diff options
author | arcadia-devtools <arcadia-devtools@yandex-team.ru> | 2022-05-25 09:07:37 +0300 |
---|---|---|
committer | arcadia-devtools <arcadia-devtools@yandex-team.ru> | 2022-05-25 09:07:37 +0300 |
commit | 8583a3a0c95359629a3fd0816588a3f550ff4489 (patch) | |
tree | 0c031ebc7e67f2f32fd9c24d9d563d9d70e615d1 /contrib | |
parent | b1826fcfe6cc1ada0fa9bd0fc4366ef04e2c52d9 (diff) | |
download | ydb-8583a3a0c95359629a3fd0816588a3f550ff4489.tar.gz |
intermediate changes
ref:0bc6b460c17cb0983ee7c2afa9cd22398e601bc8
Diffstat (limited to 'contrib')
75 files changed, 6267 insertions, 2678 deletions
diff --git a/contrib/python/boto3/py3/.dist-info/METADATA b/contrib/python/boto3/py3/.dist-info/METADATA index 4696c10b40..b46e114c6b 100644 --- a/contrib/python/boto3/py3/.dist-info/METADATA +++ b/contrib/python/boto3/py3/.dist-info/METADATA @@ -1,6 +1,6 @@ Metadata-Version: 2.1 Name: boto3 -Version: 1.22.10 +Version: 1.22.11 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services @@ -22,7 +22,7 @@ Classifier: Programming Language :: Python :: 3.10 Requires-Python: >= 3.6 License-File: LICENSE License-File: NOTICE -Requires-Dist: botocore (<1.26.0,>=1.25.10) +Requires-Dist: botocore (<1.26.0,>=1.25.11) Requires-Dist: jmespath (<2.0.0,>=0.7.1) Requires-Dist: s3transfer (<0.6.0,>=0.5.0) Provides-Extra: crt diff --git a/contrib/python/boto3/py3/boto3/__init__.py b/contrib/python/boto3/py3/boto3/__init__.py index 10c7a41f5d..3e76503721 100644 --- a/contrib/python/boto3/py3/boto3/__init__.py +++ b/contrib/python/boto3/py3/boto3/__init__.py @@ -17,7 +17,7 @@ from boto3.compat import _warn_deprecated_python from boto3.session import Session __author__ = 'Amazon Web Services' -__version__ = '1.22.10' +__version__ = '1.22.11' # The default Boto3 session; autoloaded when needed. diff --git a/contrib/python/botocore/py3/.dist-info/METADATA b/contrib/python/botocore/py3/.dist-info/METADATA index 437a700acf..511ae48b13 100644 --- a/contrib/python/botocore/py3/.dist-info/METADATA +++ b/contrib/python/botocore/py3/.dist-info/METADATA @@ -1,6 +1,6 @@ Metadata-Version: 2.1 Name: botocore -Version: 1.25.10 +Version: 1.25.11 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services diff --git a/contrib/python/botocore/py3/botocore/__init__.py b/contrib/python/botocore/py3/botocore/__init__.py index 8412efb0ec..53ad3f0f22 100644 --- a/contrib/python/botocore/py3/botocore/__init__.py +++ b/contrib/python/botocore/py3/botocore/__init__.py @@ -16,7 +16,7 @@ import logging import os import re -__version__ = '1.25.10' +__version__ = '1.25.11' class NullHandler(logging.Handler): @@ -64,7 +64,7 @@ BOTOCORE_ROOT = os.path.dirname(__file__) # Used to specify anonymous (unsigned) request signature -class UNSIGNED(object): +class UNSIGNED: def __copy__(self): return self @@ -92,7 +92,7 @@ def xform_name(name, sep='_', _xform_cache=_xform_cache): is_special = _special_case_transform.search(name) matched = is_special.group() # Replace something like ARNs, ACLs with _arns, _acls. - name = name[:-len(matched)] + sep + matched.lower() + name = f"{name[: -len(matched)]}{sep}{matched.lower()}" s1 = _first_cap_regex.sub(r'\1' + sep + r'\2', name) transformed = _end_cap_regex.sub(r'\1' + sep + r'\2', s1).lower() _xform_cache[key] = transformed diff --git a/contrib/python/botocore/py3/botocore/args.py b/contrib/python/botocore/py3/botocore/args.py index 3fa0b931c1..1deebfc954 100644 --- a/contrib/python/botocore/py3/botocore/args.py +++ b/contrib/python/botocore/py3/botocore/args.py @@ -55,9 +55,16 @@ LEGACY_GLOBAL_STS_REGIONS = [ ] -class ClientArgsCreator(object): - def __init__(self, event_emitter, user_agent, response_parser_factory, - loader, exceptions_factory, config_store): +class ClientArgsCreator: + def __init__( + self, + event_emitter, + user_agent, + response_parser_factory, + loader, + exceptions_factory, + config_store, + ): self._event_emitter = event_emitter self._user_agent = user_agent self._response_parser_factory = response_parser_factory @@ -65,14 +72,29 @@ class ClientArgsCreator(object): self._exceptions_factory = exceptions_factory self._config_store = config_store - def get_client_args(self, service_model, region_name, is_secure, - endpoint_url, verify, credentials, scoped_config, - client_config, endpoint_bridge): + def get_client_args( + self, + service_model, + region_name, + is_secure, + endpoint_url, + verify, + credentials, + scoped_config, + client_config, + endpoint_bridge, + ): final_args = self.compute_client_args( - service_model, client_config, endpoint_bridge, region_name, - endpoint_url, is_secure, scoped_config) + service_model, + client_config, + endpoint_bridge, + region_name, + endpoint_url, + is_secure, + scoped_config, + ) - service_name = final_args['service_name'] # noqa + service_name = final_args['service_name'] # noqa parameter_validation = final_args['parameter_validation'] endpoint_config = final_args['endpoint_config'] protocol = final_args['protocol'] @@ -86,10 +108,12 @@ class ClientArgsCreator(object): event_emitter = copy.copy(self._event_emitter) signer = RequestSigner( - service_model.service_id, signing_region, + service_model.service_id, + signing_region, endpoint_config['signing_name'], endpoint_config['signature_version'], - credentials, event_emitter + credentials, + event_emitter, ) config_kwargs['s3'] = s3_config @@ -97,18 +121,22 @@ class ClientArgsCreator(object): endpoint_creator = EndpointCreator(event_emitter) endpoint = endpoint_creator.create_endpoint( - service_model, region_name=endpoint_region_name, - endpoint_url=endpoint_config['endpoint_url'], verify=verify, + service_model, + region_name=endpoint_region_name, + endpoint_url=endpoint_config['endpoint_url'], + verify=verify, response_parser_factory=self._response_parser_factory, max_pool_connections=new_config.max_pool_connections, proxies=new_config.proxies, timeout=(new_config.connect_timeout, new_config.read_timeout), socket_options=socket_options, client_cert=new_config.client_cert, - proxies_config=new_config.proxies_config) + proxies_config=new_config.proxies_config, + ) serializer = botocore.serialize.create_serializer( - protocol, parameter_validation) + protocol, parameter_validation + ) response_parser = botocore.parsers.create_parser(protocol) return { 'serializer': serializer, @@ -120,12 +148,19 @@ class ClientArgsCreator(object): 'loader': self._loader, 'client_config': new_config, 'partition': partition, - 'exceptions_factory': self._exceptions_factory + 'exceptions_factory': self._exceptions_factory, } - def compute_client_args(self, service_model, client_config, - endpoint_bridge, region_name, endpoint_url, - is_secure, scoped_config): + def compute_client_args( + self, + service_model, + client_config, + endpoint_bridge, + region_name, + endpoint_url, + is_secure, + scoped_config, + ): service_name = service_model.endpoint_prefix protocol = service_model.metadata['protocol'] parameter_validation = True @@ -160,7 +195,8 @@ class ClientArgsCreator(object): config_kwargs = dict( region_name=endpoint_config['region_name'], signature_version=endpoint_config['signature_version'], - user_agent=user_agent) + user_agent=user_agent, + ) if 'dualstack' in endpoint_variant_tags: config_kwargs.update(use_dualstack_endpoint=True) if 'fips' in endpoint_variant_tags: @@ -195,7 +231,7 @@ class ClientArgsCreator(object): 'protocol': protocol, 'config_kwargs': config_kwargs, 's3_config': s3_config, - 'socket_options': self._compute_socket_options(scoped_config) + 'socket_options': self._compute_socket_options(scoped_config), } def compute_s3_config(self, client_config): @@ -217,8 +253,15 @@ class ClientArgsCreator(object): return s3_configuration - def _compute_endpoint_config(self, service_name, region_name, endpoint_url, - is_secure, endpoint_bridge, s3_config): + def _compute_endpoint_config( + self, + service_name, + region_name, + endpoint_url, + is_secure, + endpoint_bridge, + s3_config, + ): resolve_endpoint_kwargs = { 'service_name': service_name, 'region_name': region_name, @@ -228,20 +271,24 @@ class ClientArgsCreator(object): } if service_name == 's3': return self._compute_s3_endpoint_config( - s3_config=s3_config, **resolve_endpoint_kwargs) + s3_config=s3_config, **resolve_endpoint_kwargs + ) if service_name == 'sts': return self._compute_sts_endpoint_config(**resolve_endpoint_kwargs) return self._resolve_endpoint(**resolve_endpoint_kwargs) - def _compute_s3_endpoint_config(self, s3_config, - **resolve_endpoint_kwargs): + def _compute_s3_endpoint_config( + self, s3_config, **resolve_endpoint_kwargs + ): force_s3_global = self._should_force_s3_global( - resolve_endpoint_kwargs['region_name'], s3_config) + resolve_endpoint_kwargs['region_name'], s3_config + ) if force_s3_global: resolve_endpoint_kwargs['region_name'] = None endpoint_config = self._resolve_endpoint(**resolve_endpoint_kwargs) self._set_region_if_custom_s3_endpoint( - endpoint_config, resolve_endpoint_kwargs['endpoint_bridge']) + endpoint_config, resolve_endpoint_kwargs['endpoint_bridge'] + ) # For backwards compatibility reasons, we want to make sure the # client.meta.region_name will remain us-east-1 if we forced the # endpoint to be the global region. Specifically, if this value @@ -256,24 +303,26 @@ class ClientArgsCreator(object): if s3_config and 'us_east_1_regional_endpoint' in s3_config: s3_regional_config = s3_config['us_east_1_regional_endpoint'] self._validate_s3_regional_config(s3_regional_config) - return ( - s3_regional_config == 'legacy' and - region_name in ['us-east-1', None] - ) + + is_global_region = region_name in ('us-east-1', None) + return s3_regional_config == 'legacy' and is_global_region def _validate_s3_regional_config(self, config_val): if config_val not in VALID_REGIONAL_ENDPOINTS_CONFIG: - raise botocore.exceptions.\ - InvalidS3UsEast1RegionalEndpointConfigError( - s3_us_east_1_regional_endpoint_config=config_val) + raise botocore.exceptions.InvalidS3UsEast1RegionalEndpointConfigError( + s3_us_east_1_regional_endpoint_config=config_val + ) - def _set_region_if_custom_s3_endpoint(self, endpoint_config, - endpoint_bridge): + def _set_region_if_custom_s3_endpoint( + self, endpoint_config, endpoint_bridge + ): # If a user is providing a custom URL, the endpoint resolver will # refuse to infer a signing region. If we want to default to s3v4, # we have to account for this. - if endpoint_config['signing_region'] is None \ - and endpoint_config['region_name'] is None: + if ( + endpoint_config['signing_region'] is None + and endpoint_config['region_name'] is None + ): endpoint = endpoint_bridge.resolve('s3') endpoint_config['signing_region'] = endpoint['signing_region'] endpoint_config['region_name'] = endpoint['region_name'] @@ -283,31 +332,37 @@ class ClientArgsCreator(object): if self._should_set_global_sts_endpoint( resolve_endpoint_kwargs['region_name'], resolve_endpoint_kwargs['endpoint_url'], - endpoint_config + endpoint_config, ): self._set_global_sts_endpoint( - endpoint_config, resolve_endpoint_kwargs['is_secure']) + endpoint_config, resolve_endpoint_kwargs['is_secure'] + ) return endpoint_config - def _should_set_global_sts_endpoint(self, region_name, endpoint_url, - endpoint_config): + def _should_set_global_sts_endpoint( + self, region_name, endpoint_url, endpoint_config + ): endpoint_variant_tags = endpoint_config['metadata'].get('tags') if endpoint_url or endpoint_variant_tags: return False return ( - self._get_sts_regional_endpoints_config() == 'legacy' and - region_name in LEGACY_GLOBAL_STS_REGIONS + self._get_sts_regional_endpoints_config() == 'legacy' + and region_name in LEGACY_GLOBAL_STS_REGIONS ) def _get_sts_regional_endpoints_config(self): sts_regional_endpoints_config = self._config_store.get_config_variable( - 'sts_regional_endpoints') + 'sts_regional_endpoints' + ) if not sts_regional_endpoints_config: sts_regional_endpoints_config = 'legacy' - if sts_regional_endpoints_config not in \ - VALID_REGIONAL_ENDPOINTS_CONFIG: + if ( + sts_regional_endpoints_config + not in VALID_REGIONAL_ENDPOINTS_CONFIG + ): raise botocore.exceptions.InvalidSTSRegionalEndpointsConfigError( - sts_regional_endpoints_config=sts_regional_endpoints_config) + sts_regional_endpoints_config=sts_regional_endpoints_config + ) return sts_regional_endpoints_config def _set_global_sts_endpoint(self, endpoint_config, is_secure): @@ -315,10 +370,17 @@ class ClientArgsCreator(object): endpoint_config['endpoint_url'] = '%s://sts.amazonaws.com' % scheme endpoint_config['signing_region'] = 'us-east-1' - def _resolve_endpoint(self, service_name, region_name, - endpoint_url, is_secure, endpoint_bridge): + def _resolve_endpoint( + self, + service_name, + region_name, + endpoint_url, + is_secure, + endpoint_bridge, + ): return endpoint_bridge.resolve( - service_name, region_name, endpoint_url, is_secure) + service_name, region_name, endpoint_url, is_secure + ) def _compute_socket_options(self, scoped_config): # This disables Nagle's algorithm and is the default socket options @@ -328,7 +390,8 @@ class ClientArgsCreator(object): # Enables TCP Keepalive if specified in shared config file. if self._ensure_boolean(scoped_config.get('tcp_keepalive', False)): socket_options.append( - (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)) + (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) + ) return socket_options def _compute_retry_config(self, config_kwargs): @@ -389,7 +452,8 @@ class ClientArgsCreator(object): if connect_timeout is not None: return connect_timeout = self._config_store.get_config_variable( - 'connect_timeout') + 'connect_timeout' + ) if connect_timeout: config_kwargs['connect_timeout'] = connect_timeout diff --git a/contrib/python/botocore/py3/botocore/auth.py b/contrib/python/botocore/py3/botocore/auth.py index 3ec6219d41..a0ca43159f 100644 --- a/contrib/python/botocore/py3/botocore/auth.py +++ b/contrib/python/botocore/py3/botocore/auth.py @@ -16,6 +16,7 @@ import calendar import datetime import functools import hmac +import json import logging import time from email.utils import formatdate @@ -27,10 +28,8 @@ from botocore.compat import ( HTTPHeaders, encodebytes, ensure_unicode, - json, parse_qs, quote, - six, unquote, urlsplit, urlunsplit, @@ -43,14 +42,15 @@ from botocore.utils import ( ) # Imports for backwards compatibility -from botocore.compat import MD5_AVAILABLE # noqa +from botocore.compat import MD5_AVAILABLE # noqa logger = logging.getLogger(__name__) EMPTY_SHA256_HASH = ( - 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855') + 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' +) # This is the buffer size used when calculating sha256 checksums. # Experimenting with various buffer sizes showed that this value generally # gave the best result (in terms of performance). @@ -74,10 +74,10 @@ def _host_from_url(url): url_parts = urlsplit(url) host = url_parts.hostname # urlsplit's hostname is always lowercase if is_valid_ipv6_endpoint_url(url): - host = '[%s]' % (host) + host = f'[{host}]' default_ports = { 'http': 80, - 'https': 443 + 'https': 443, } if url_parts.port is not None: if url_parts.port != default_ports.get(url_parts.scheme): @@ -91,14 +91,14 @@ def _get_body_as_dict(request): # string or bytes. In those cases we attempt to load the data as a # dict. data = request.data - if isinstance(data, six.binary_type): + if isinstance(data, bytes): data = json.loads(data.decode('utf-8')) - elif isinstance(data, six.string_types): + elif isinstance(data, str): data = json.loads(data) return data -class BaseSigner(object): +class BaseSigner: REQUIRES_REGION = False def add_auth(self, request): @@ -119,11 +119,10 @@ class SigV2Auth(BaseSigner): path = split.path if len(path) == 0: path = '/' - string_to_sign = '%s\n%s\n%s\n' % (request.method, - split.netloc, - path) - lhmac = hmac.new(self.credentials.secret_key.encode('utf-8'), - digestmod=sha256) + string_to_sign = f"{request.method}\n{split.netloc}\n{path}\n" + lhmac = hmac.new( + self.credentials.secret_key.encode("utf-8"), digestmod=sha256 + ) pairs = [] for key in sorted(params): # Any previous signature should not be a part of this @@ -131,7 +130,7 @@ class SigV2Auth(BaseSigner): # issues during retries. if key == 'Signature': continue - value = six.text_type(params[key]) + value = str(params[key]) quoted_key = quote(key.encode('utf-8'), safe='') quoted_value = quote(value.encode('utf-8'), safe='-_~') pairs.append(f'{quoted_key}={quoted_value}') @@ -181,13 +180,15 @@ class SigV3Auth(BaseSigner): if 'X-Amz-Security-Token' in request.headers: del request.headers['X-Amz-Security-Token'] request.headers['X-Amz-Security-Token'] = self.credentials.token - new_hmac = hmac.new(self.credentials.secret_key.encode('utf-8'), - digestmod=sha256) + new_hmac = hmac.new( + self.credentials.secret_key.encode('utf-8'), digestmod=sha256 + ) new_hmac.update(request.headers['Date'].encode('utf-8')) encoded_signature = encodebytes(new_hmac.digest()).strip() - signature = ('AWS3-HTTPS AWSAccessKeyId=%s,Algorithm=%s,Signature=%s' % - (self.credentials.access_key, 'HmacSHA256', - encoded_signature.decode('utf-8'))) + signature = ( + f"AWS3-HTTPS AWSAccessKeyId={self.credentials.access_key}," + f"Algorithm=HmacSHA256,Signature={encoded_signature.decode('utf-8')}" + ) if 'X-Amzn-Authorization' in request.headers: del request.headers['X-Amzn-Authorization'] request.headers['X-Amzn-Authorization'] = signature @@ -197,6 +198,7 @@ class SigV4Auth(BaseSigner): """ Sign a request with Signature V4. """ + REQUIRES_REGION = True def __init__(self, credentials, service_name, region_name): @@ -245,13 +247,14 @@ class SigV4Auth(BaseSigner): key_val_pairs = [] for key in params: value = str(params[key]) - key_val_pairs.append((quote(key, safe='-_.~'), - quote(value, safe='-_.~'))) + key_val_pairs.append( + (quote(key, safe='-_.~'), quote(value, safe='-_.~')) + ) sorted_key_vals = [] # Sort by the URI-encoded key names, and in the case of # repeated keys, then sort by the value. for key, value in sorted(key_val_pairs): - sorted_key_vals.append('%s=%s' % (key, value)) + sorted_key_vals.append(f'{key}={value}') canonical_query_string = '&'.join(sorted_key_vals) return canonical_query_string @@ -267,7 +270,7 @@ class SigV4Auth(BaseSigner): # Sort by the URI-encoded key names, and in the case of # repeated keys, then sort by the value. for key, value in sorted(key_val_pairs): - sorted_key_vals.append('%s=%s' % (key, value)) + sorted_key_vals.append(f'{key}={value}') canonical_query_string = '&'.join(sorted_key_vals) return canonical_query_string @@ -281,9 +284,10 @@ class SigV4Auth(BaseSigner): headers = [] sorted_header_names = sorted(set(headers_to_sign)) for key in sorted_header_names: - value = ','.join(self._header_value(v) for v in - headers_to_sign.get_all(key)) - headers.append('%s:%s' % (key, ensure_unicode(value))) + value = ','.join( + self._header_value(v) for v in headers_to_sign.get_all(key) + ) + headers.append(f'{key}:{ensure_unicode(value)}') return '\n'.join(headers) def _header_value(self, value): @@ -295,9 +299,7 @@ class SigV4Auth(BaseSigner): return ' '.join(value.split()) def signed_headers(self, headers_to_sign): - headers = sorted( - [n.lower().strip() for n in set(headers_to_sign)] - ) + headers = sorted(n.lower().strip() for n in set(headers_to_sign)) return ';'.join(headers) def _is_streaming_checksum_payload(self, request): @@ -315,8 +317,9 @@ class SigV4Auth(BaseSigner): request_body = request.body if request_body and hasattr(request_body, 'seek'): position = request_body.tell() - read_chunksize = functools.partial(request_body.read, - PAYLOAD_BUFFER) + read_chunksize = functools.partial( + request_body.read, PAYLOAD_BUFFER + ) checksum = sha256() for chunk in iter(read_chunksize, b''): checksum.update(chunk) @@ -389,8 +392,9 @@ class SigV4Auth(BaseSigner): def signature(self, string_to_sign, request): key = self.credentials.secret_key - k_date = self._sign(('AWS4' + key).encode('utf-8'), - request.context['timestamp'][0:8]) + k_date = self._sign( + (f"AWS4{key}").encode(), request.context["timestamp"][0:8] + ) k_region = self._sign(k_date, self._region_name) k_service = self._sign(k_region, self._service_name) k_signing = self._sign(k_service, 'aws4_request') @@ -417,7 +421,9 @@ class SigV4Auth(BaseSigner): def _inject_signature_to_request(self, request, signature): auth_str = ['AWS4-HMAC-SHA256 Credential=%s' % self.scope(request)] headers_to_sign = self.headers_to_sign(request) - auth_str.append('SignedHeaders=%s' % self.signed_headers(headers_to_sign)) + auth_str.append( + f"SignedHeaders={self.signed_headers(headers_to_sign)}" + ) auth_str.append('Signature=%s' % signature) request.headers['Authorization'] = ', '.join(auth_str) return request @@ -443,9 +449,11 @@ class SigV4Auth(BaseSigner): if 'Date' in request.headers: del request.headers['Date'] datetime_timestamp = datetime.datetime.strptime( - request.context['timestamp'], SIGV4_TIMESTAMP) + request.context['timestamp'], SIGV4_TIMESTAMP + ) request.headers['Date'] = formatdate( - int(calendar.timegm(datetime_timestamp.timetuple()))) + int(calendar.timegm(datetime_timestamp.timetuple())) + ) if 'X-Amz-Date' in request.headers: del request.headers['X-Amz-Date'] else: @@ -456,7 +464,7 @@ class SigV4Auth(BaseSigner): class S3SigV4Auth(SigV4Auth): def _modify_request_before_signing(self, request): - super(S3SigV4Auth, self)._modify_request_before_signing(request) + super()._modify_request_before_signing(request) if 'X-Amz-Content-SHA256' in request.headers: del request.headers['X-Amz-Content-SHA256'] @@ -489,8 +497,10 @@ class S3SigV4Auth(SigV4Auth): algorithm = checksum_context.get('request_algorithm') if isinstance(algorithm, dict) and algorithm.get('in') == 'header': checksum_header = algorithm['name'] - if not request.url.startswith('https') or \ - checksum_header not in request.headers: + if ( + not request.url.startswith("https") + or checksum_header not in request.headers + ): return True # If the input is streaming we disable body signing by default. @@ -499,7 +509,7 @@ class S3SigV4Auth(SigV4Auth): # If the S3-specific checks had no results, delegate to the generic # checks. - return super(S3SigV4Auth, self)._should_sha256_sign_payload(request) + return super()._should_sha256_sign_payload(request) def _normalize_url_path(self, path): # For S3, we do not normalize the path. @@ -509,10 +519,10 @@ class S3SigV4Auth(SigV4Auth): class SigV4QueryAuth(SigV4Auth): DEFAULT_EXPIRES = 3600 - def __init__(self, credentials, service_name, region_name, - expires=DEFAULT_EXPIRES): - super(SigV4QueryAuth, self).__init__(credentials, service_name, - region_name) + def __init__( + self, credentials, service_name, region_name, expires=DEFAULT_EXPIRES + ): + super().__init__(credentials, service_name, region_name) self._expires = expires def _modify_request_before_signing(self, request): @@ -545,9 +555,9 @@ class SigV4QueryAuth(SigV4Auth): # parse_qs makes each value a list, but in our case we know we won't # have repeated keys so we know we have single element lists which we # can convert back to scalar values. - query_dict = dict( - [(k, v[0]) for k, v in - parse_qs(url_parts.query, keep_blank_values=True).items()]) + query_string_parts = parse_qs(url_parts.query, keep_blank_values=True) + query_dict = {k: v[0] for k, v in query_string_parts.items()} + if request.params: query_dict.update(request.params) request.params = {} @@ -565,8 +575,9 @@ class SigV4QueryAuth(SigV4Auth): request.data = '' if query_dict: operation_params = percent_encode_sequence(query_dict) + '&' - new_query_string = (operation_params + - percent_encode_sequence(auth_params)) + new_query_string = ( + f"{operation_params}{percent_encode_sequence(auth_params)}" + ) # url_parts is a tuple (and therefore immutable) so we need to create # a new url_parts with the new query string. # <part> - <index> @@ -597,6 +608,7 @@ class S3SigV4QueryAuth(SigV4QueryAuth): http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html """ + def _normalize_url_path(self, path): # For S3, we do not normalize the path. return path @@ -616,6 +628,7 @@ class S3SigV4PostAuth(SigV4Auth): Implementation doc here: http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-UsingHTTPPOST.html """ + def add_auth(self, request): datetime_now = datetime.datetime.utcnow() request.context['timestamp'] = datetime_now.strftime(SIGV4_TIMESTAMP) @@ -647,7 +660,8 @@ class S3SigV4PostAuth(SigV4Auth): # Dump the base64 encoded policy into the fields dictionary. fields['policy'] = base64.b64encode( - json.dumps(policy).encode('utf-8')).decode('utf-8') + json.dumps(policy).encode('utf-8') + ).decode('utf-8') fields['x-amz-signature'] = self.signature(fields['policy'], request) @@ -658,24 +672,52 @@ class S3SigV4PostAuth(SigV4Auth): class HmacV1Auth(BaseSigner): # List of Query String Arguments of Interest - QSAOfInterest = ['accelerate', 'acl', 'cors', 'defaultObjectAcl', - 'location', 'logging', 'partNumber', 'policy', - 'requestPayment', 'torrent', - 'versioning', 'versionId', 'versions', 'website', - 'uploads', 'uploadId', 'response-content-type', - 'response-content-language', 'response-expires', - 'response-cache-control', 'response-content-disposition', - 'response-content-encoding', 'delete', 'lifecycle', - 'tagging', 'restore', 'storageClass', 'notification', - 'replication', 'requestPayment', 'analytics', 'metrics', - 'inventory', 'select', 'select-type', 'object-lock'] + QSAOfInterest = [ + 'accelerate', + 'acl', + 'cors', + 'defaultObjectAcl', + 'location', + 'logging', + 'partNumber', + 'policy', + 'requestPayment', + 'torrent', + 'versioning', + 'versionId', + 'versions', + 'website', + 'uploads', + 'uploadId', + 'response-content-type', + 'response-content-language', + 'response-expires', + 'response-cache-control', + 'response-content-disposition', + 'response-content-encoding', + 'delete', + 'lifecycle', + 'tagging', + 'restore', + 'storageClass', + 'notification', + 'replication', + 'requestPayment', + 'analytics', + 'metrics', + 'inventory', + 'select', + 'select-type', + 'object-lock', + ] def __init__(self, credentials, service_name=None, region_name=None): self.credentials = credentials def sign_string(self, string_to_sign): - new_hmac = hmac.new(self.credentials.secret_key.encode('utf-8'), - digestmod=sha1) + new_hmac = hmac.new( + self.credentials.secret_key.encode('utf-8'), digestmod=sha1 + ) new_hmac.update(string_to_sign.encode('utf-8')) return encodebytes(new_hmac.digest()).strip().decode('utf-8') @@ -703,11 +745,12 @@ class HmacV1Auth(BaseSigner): lk = key.lower() if headers[key] is not None: if lk.startswith('x-amz-'): - custom_headers[lk] = ','.join(v.strip() for v in - headers.get_all(key)) + custom_headers[lk] = ','.join( + v.strip() for v in headers.get_all(key) + ) sorted_header_keys = sorted(custom_headers.keys()) for key in sorted_header_keys: - hoi.append("%s:%s" % (key, custom_headers[key])) + hoi.append(f"{key}:{custom_headers[key]}") return '\n'.join(hoi) def unquote_v(self, nv): @@ -735,8 +778,9 @@ class HmacV1Auth(BaseSigner): if split.query: qsa = split.query.split('&') qsa = [a.split('=', 1) for a in qsa] - qsa = [self.unquote_v(a) for a in qsa - if a[0] in self.QSAOfInterest] + qsa = [ + self.unquote_v(a) for a in qsa if a[0] in self.QSAOfInterest + ] if len(qsa) > 0: qsa.sort(key=itemgetter(0)) qsa = ['='.join(a) for a in qsa] @@ -744,8 +788,9 @@ class HmacV1Auth(BaseSigner): buf += '&'.join(qsa) return buf - def canonical_string(self, method, split, headers, expires=None, - auth_path=None): + def canonical_string( + self, method, split, headers, expires=None, auth_path=None + ): cs = method.upper() + '\n' cs += self.canonical_standard_headers(headers) + '\n' custom_headers = self.canonical_custom_headers(headers) @@ -754,15 +799,15 @@ class HmacV1Auth(BaseSigner): cs += self.canonical_resource(split, auth_path=auth_path) return cs - def get_signature(self, method, split, headers, expires=None, - auth_path=None): + def get_signature( + self, method, split, headers, expires=None, auth_path=None + ): if self.credentials.token: del headers['x-amz-security-token'] headers['x-amz-security-token'] = self.credentials.token - string_to_sign = self.canonical_string(method, - split, - headers, - auth_path=auth_path) + string_to_sign = self.canonical_string( + method, split, headers, auth_path=auth_path + ) logger.debug('StringToSign:\n%s', string_to_sign) return self.sign_string(string_to_sign) @@ -772,9 +817,9 @@ class HmacV1Auth(BaseSigner): logger.debug("Calculating signature using hmacv1 auth.") split = urlsplit(request.url) logger.debug('HTTP request method: %s', request.method) - signature = self.get_signature(request.method, split, - request.headers, - auth_path=request.auth_path) + signature = self.get_signature( + request.method, split, request.headers, auth_path=request.auth_path + ) self._inject_signature(request, signature) def _get_date(self): @@ -789,8 +834,9 @@ class HmacV1Auth(BaseSigner): # headers['foo'] = 'a'; headers['foo'] = 'b' # list(headers) will print ['foo', 'foo']. del request.headers['Authorization'] - request.headers['Authorization'] = ( - "AWS %s:%s" % (self.credentials.access_key, signature)) + + auth_header = f"AWS {self.credentials.access_key}:{signature}" + request.headers['Authorization'] = auth_header class HmacV1QueryAuth(HmacV1Auth): @@ -803,6 +849,7 @@ class HmacV1QueryAuth(HmacV1Auth): #RESTAuthenticationQueryStringAuth """ + DEFAULT_EXPIRES = 3600 def __init__(self, credentials, expires=DEFAULT_EXPIRES): @@ -826,8 +873,10 @@ class HmacV1QueryAuth(HmacV1Auth): # We only want to include relevant headers in the query string. # These can be anything that starts with x-amz, is Content-MD5, # or is Content-Type. - elif lk.startswith('x-amz-') or lk in ['content-md5', - 'content-type']: + elif lk.startswith('x-amz-') or lk in ( + 'content-md5', + 'content-type', + ): query_dict[lk] = request.headers[lk] # Combine all of the identified headers into an encoded # query string @@ -838,7 +887,7 @@ class HmacV1QueryAuth(HmacV1Auth): if p[3]: # If there was a pre-existing query string, we should # add that back before injecting the new query string. - new_query_string = '%s&%s' % (p[3], new_query_string) + new_query_string = f'{p[3]}&{new_query_string}' new_url_parts = (p[0], p[1], p[2], new_query_string, p[4]) request.url = urlunsplit(new_url_parts) @@ -851,6 +900,7 @@ class HmacV1PostAuth(HmacV1Auth): http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingHTTPPOST.html """ + def add_auth(self, request): fields = {} if request.context.get('s3-presign-post-fields', None) is not None: @@ -873,7 +923,8 @@ class HmacV1PostAuth(HmacV1Auth): # Dump the base64 encoded policy into the fields dictionary. fields['policy'] = base64.b64encode( - json.dumps(policy).encode('utf-8')).decode('utf-8') + json.dumps(policy).encode('utf-8') + ).decode('utf-8') fields['signature'] = self.sign_string(fields['policy']) @@ -894,11 +945,14 @@ AUTH_TYPE_MAPS = { # Define v4 signers depending on if CRT is present if HAS_CRT: from botocore.crt.auth import CRT_AUTH_TYPE_MAPS + AUTH_TYPE_MAPS.update(CRT_AUTH_TYPE_MAPS) else: - AUTH_TYPE_MAPS.update({ - 'v4': SigV4Auth, - 'v4-query': SigV4QueryAuth, - 's3v4': S3SigV4Auth, - 's3v4-query': S3SigV4QueryAuth, - }) + AUTH_TYPE_MAPS.update( + { + 'v4': SigV4Auth, + 'v4-query': SigV4QueryAuth, + 's3v4': S3SigV4Auth, + 's3v4-query': S3SigV4QueryAuth, + } + ) diff --git a/contrib/python/botocore/py3/botocore/awsrequest.py b/contrib/python/botocore/py3/botocore/awsrequest.py index 92ad0b7a2b..e9696a0b1c 100644 --- a/contrib/python/botocore/py3/botocore/awsrequest.py +++ b/contrib/python/botocore/py3/botocore/awsrequest.py @@ -23,7 +23,6 @@ from botocore.compat import ( HTTPHeaders, HTTPResponse, MutableMapping, - six, urlencode, urlparse, urlsplit, @@ -50,7 +49,7 @@ class AWSHTTPResponse(HTTPResponse): return HTTPResponse._read_status(self) -class AWSConnection(object): +class AWSConnection: """Mixin for HTTPConnection that supports Expect 100-continue. This when mixed with a subclass of httplib.HTTPConnection (though @@ -62,8 +61,9 @@ class AWSConnection(object): this against AWS services. """ + def __init__(self, *args, **kwargs): - super(AWSConnection, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self._original_response_cls = self.response_class # We'd ideally hook into httplib's states, but they're all # __mangled_vars so we use our own state var. This variable is set @@ -77,7 +77,7 @@ class AWSConnection(object): self._expect_header_set = False def close(self): - super(AWSConnection, self).close() + super().close() # Reset all of our instance state we were tracking. self._response_received = False self._expect_header_set = False @@ -90,8 +90,9 @@ class AWSConnection(object): else: self._expect_header_set = False self.response_class = self._original_response_cls - rval = super(AWSConnection, self)._send_request( - method, url, body, headers, *args, **kwargs) + rval = super()._send_request( + method, url, body, headers, *args, **kwargs + ) self._expect_header_set = False return rval @@ -101,7 +102,7 @@ class AWSConnection(object): # Any six.text_types will be encoded as utf-8. bytes_buffer = [] for chunk in mixed_buffer: - if isinstance(chunk, six.text_type): + if isinstance(chunk, str): bytes_buffer.append(chunk.encode('utf-8')) else: bytes_buffer.append(chunk) @@ -137,8 +138,10 @@ class AWSConnection(object): # server (possibly via a proxy) from which it has never seen a # 100 (Continue) status, the client SHOULD NOT wait for an # indefinite period before sending the request body. - logger.debug("No response seen from server, continuing to " - "send the response body.") + logger.debug( + "No response seen from server, continuing to " + "send the response body." + ) if message_body is not None: # message_body was not a string (i.e. it is a file), and # we must run the risk of Nagle. @@ -166,8 +169,9 @@ class AWSConnection(object): parts = maybe_status_line.split(None, 2) if self._is_100_continue_status(maybe_status_line): self._consume_headers(fp) - logger.debug("100 Continue response seen, " - "now sending request body.") + logger.debug( + "100 Continue response seen, now sending request body." + ) self._send_message_body(message_body) elif len(parts) == 3 and parts[0].startswith(b'HTTP/'): # From the RFC: @@ -182,12 +186,18 @@ class AWSConnection(object): # So if we don't get a 100 Continue response, then # whatever the server has sent back is the final response # and don't send the message_body. - logger.debug("Received a non 100 Continue response " - "from the server, NOT sending request body.") - status_tuple = (parts[0].decode('ascii'), - int(parts[1]), parts[2].decode('ascii')) + logger.debug( + "Received a non 100 Continue response " + "from the server, NOT sending request body." + ) + status_tuple = ( + parts[0].decode('ascii'), + int(parts[1]), + parts[2].decode('ascii'), + ) response_class = functools.partial( - AWSHTTPResponse, status_tuple=status_tuple) + AWSHTTPResponse, status_tuple=status_tuple + ) self.response_class = response_class self._response_received = True finally: @@ -199,10 +209,12 @@ class AWSConnection(object): def send(self, str): if self._response_received: - logger.debug("send() called, but reseponse already received. " - "Not sending data.") + logger.debug( + "send() called, but reseponse already received. " + "Not sending data." + ) return - return super(AWSConnection, self).send(str) + return super().send(str) def _is_100_continue_status(self, maybe_status_line): parts = maybe_status_line.split(None, 2) @@ -215,11 +227,11 @@ class AWSConnection(object): class AWSHTTPConnection(AWSConnection, HTTPConnection): - """ An HTTPConnection that supports 100 Continue behavior. """ + """An HTTPConnection that supports 100 Continue behavior.""" class AWSHTTPSConnection(AWSConnection, VerifiedHTTPSConnection): - """ An HTTPSConnection that supports 100 Continue behavior. """ + """An HTTPSConnection that supports 100 Continue behavior.""" class AWSHTTPConnectionPool(HTTPConnectionPool): @@ -230,8 +242,9 @@ class AWSHTTPSConnectionPool(HTTPSConnectionPool): ConnectionCls = AWSHTTPSConnection -def prepare_request_dict(request_dict, endpoint_url, context=None, - user_agent=None): +def prepare_request_dict( + request_dict, endpoint_url, context=None, user_agent=None +): """ This method prepares a request dict to be created into an AWSRequestObject. This prepares the request dict by adding the @@ -285,7 +298,8 @@ def create_request_object(request_dict): """ r = request_dict request_object = AWSRequest( - method=r['method'], url=r['url'], data=r['body'], headers=r['headers']) + method=r['method'], url=r['url'], data=r['body'], headers=r['headers'] + ) request_object.context = r['context'] return request_object @@ -318,7 +332,7 @@ def _urljoin(endpoint_url, url_path, host_prefix): return reconstructed -class AWSRequestPreparer(object): +class AWSRequestPreparer: """ This class performs preparation on AWSRequest objects similar to that of the PreparedRequest class does in the requests library. However, the logic @@ -338,6 +352,7 @@ class AWSRequestPreparer(object): This class does not prepare the method, auth or cookies. """ + def prepare(self, original): method = original.method url = self._prepare_url(original) @@ -379,9 +394,9 @@ class AWSRequestPreparer(object): def _to_utf8(self, item): key, value = item - if isinstance(key, six.text_type): + if isinstance(key, str): key = key.encode('utf-8') - if isinstance(value, six.text_type): + if isinstance(value, str): value = value.encode('utf-8') return key, value @@ -401,7 +416,7 @@ class AWSRequestPreparer(object): return botocore.utils.determine_content_length(body) -class AWSRequest(object): +class AWSRequest: """Represents the elements of an HTTP request. This class was originally inspired by requests.models.Request, but has been @@ -411,14 +426,16 @@ class AWSRequest(object): _REQUEST_PREPARER_CLS = AWSRequestPreparer - def __init__(self, - method=None, - url=None, - headers=None, - data=None, - params=None, - auth_path=None, - stream_output=False): + def __init__( + self, + method=None, + url=None, + headers=None, + data=None, + params=None, + auth_path=None, + stream_output=False, + ): self._request_preparer = self._REQUEST_PREPARER_CLS() @@ -453,12 +470,12 @@ class AWSRequest(object): @property def body(self): body = self.prepare().body - if isinstance(body, six.text_type): + if isinstance(body, str): body = body.encode('utf-8') return body -class AWSPreparedRequest(object): +class AWSPreparedRequest: """A data class representing a finalized request to be sent over the wire. Requests at this stage should be treated as final, and the properties of @@ -470,6 +487,7 @@ class AWSPreparedRequest(object): :ivar body: The HTTP body. :ivar stream_output: If the response for this request should be streamed. """ + def __init__(self, method, url, headers, body, stream_output): self.method = method self.url = url @@ -497,7 +515,7 @@ class AWSPreparedRequest(object): # the entire body contents again if we need to). # Same case if the body is a string/bytes/bytearray type. - non_seekable_types = (six.binary_type, six.text_type, bytearray) + non_seekable_types = (bytes, str, bytearray) if self.body is None or isinstance(self.body, non_seekable_types): return try: @@ -508,7 +526,7 @@ class AWSPreparedRequest(object): raise UnseekableStreamError(stream_object=self.body) -class AWSResponse(object): +class AWSResponse: """A data class representing an HTTP response. This class was originally inspired by requests.models.Response, but has @@ -557,7 +575,7 @@ class AWSResponse(object): return self.content.decode('utf-8') -class _HeaderKey(object): +class _HeaderKey: def __init__(self, key): self._key = key self._lower = key.lower() @@ -576,7 +594,8 @@ class _HeaderKey(object): class HeadersDict(MutableMapping): - """A case-insenseitive dictionary to represent HTTP headers. """ + """A case-insenseitive dictionary to represent HTTP headers.""" + def __init__(self, *args, **kwargs): self._dict = {} self.update(*args, **kwargs) diff --git a/contrib/python/botocore/py3/botocore/client.py b/contrib/python/botocore/py3/botocore/client.py index 469df807e8..bbcb35a8ce 100644 --- a/contrib/python/botocore/py3/botocore/client.py +++ b/contrib/python/botocore/py3/botocore/client.py @@ -53,22 +53,31 @@ from botocore.utils import ( # "from botocore.client import Config" # "from botocore.client import ClientError" # etc. -from botocore.config import Config # noqa -from botocore.exceptions import ClientError # noqa -from botocore.args import ClientArgsCreator # noqa -from botocore import UNSIGNED # noqa +from botocore.config import Config # noqa +from botocore.exceptions import ClientError # noqa +from botocore.args import ClientArgsCreator # noqa +from botocore import UNSIGNED # noqa logger = logging.getLogger(__name__) history_recorder = get_global_history_recorder() -class ClientCreator(object): +class ClientCreator: """Creates client objects for a service.""" - def __init__(self, loader, endpoint_resolver, user_agent, event_emitter, - retry_handler_factory, retry_config_translator, - response_parser_factory=None, exceptions_factory=None, - config_store=None): + + def __init__( + self, + loader, + endpoint_resolver, + user_agent, + event_emitter, + retry_handler_factory, + retry_config_translator, + response_parser_factory=None, + exceptions_factory=None, + config_store=None, + ): self._loader = loader self._endpoint_resolver = endpoint_resolver self._user_agent = user_agent @@ -83,35 +92,64 @@ class ClientCreator(object): # future). self._config_store = config_store - def create_client(self, service_name, region_name, is_secure=True, - endpoint_url=None, verify=None, - credentials=None, scoped_config=None, - api_version=None, - client_config=None): + def create_client( + self, + service_name, + region_name, + is_secure=True, + endpoint_url=None, + verify=None, + credentials=None, + scoped_config=None, + api_version=None, + client_config=None, + ): responses = self._event_emitter.emit( - 'choose-service-name', service_name=service_name) + 'choose-service-name', service_name=service_name + ) service_name = first_non_none_response(responses, default=service_name) service_model = self._load_service_model(service_name, api_version) cls = self._create_client_class(service_name, service_model) region_name, client_config = self._normalize_fips_region( - region_name, client_config) + region_name, client_config + ) endpoint_bridge = ClientEndpointBridge( - self._endpoint_resolver, scoped_config, client_config, + self._endpoint_resolver, + scoped_config, + client_config, service_signing_name=service_model.metadata.get('signingName'), - config_store=self._config_store) + config_store=self._config_store, + ) client_args = self._get_client_args( - service_model, region_name, is_secure, endpoint_url, - verify, credentials, scoped_config, client_config, endpoint_bridge) + service_model, + region_name, + is_secure, + endpoint_url, + verify, + credentials, + scoped_config, + client_config, + endpoint_bridge, + ) service_client = cls(**client_args) self._register_retries(service_client) self._register_eventbridge_events( - service_client, endpoint_bridge, endpoint_url) + service_client, endpoint_bridge, endpoint_url + ) self._register_s3_events( - service_client, endpoint_bridge, endpoint_url, client_config, - scoped_config) + service_client, + endpoint_bridge, + endpoint_url, + client_config, + scoped_config, + ) self._register_s3_control_events( - service_client, endpoint_bridge, endpoint_url, client_config, - scoped_config) + service_client, + endpoint_bridge, + endpoint_url, + client_config, + scoped_config, + ) self._register_endpoint_discovery( service_client, endpoint_url, client_config ) @@ -130,38 +168,40 @@ class ClientCreator(object): self._event_emitter.emit( 'creating-client-class.%s' % service_id, class_attributes=class_attributes, - base_classes=bases) + base_classes=bases, + ) class_name = get_service_module_name(service_model) cls = type(str(class_name), tuple(bases), class_attributes) return cls - def _normalize_fips_region(self, region_name, - client_config): + def _normalize_fips_region(self, region_name, client_config): if region_name is not None: - normalized_region_name = region_name.replace( - 'fips-', '').replace('-fips', '') + normalized_region_name = region_name.replace('fips-', '').replace( + '-fips', '' + ) # If region has been transformed then set flag if normalized_region_name != region_name: config_use_fips_endpoint = Config(use_fips_endpoint=True) if client_config: # Keeping endpoint setting client specific client_config = client_config.merge( - config_use_fips_endpoint) + config_use_fips_endpoint + ) else: client_config = config_use_fips_endpoint logger.warning( 'transforming region from %s to %s and setting ' 'use_fips_endpoint to true. client should not ' - 'be configured with a fips psuedo region.' % ( - region_name, normalized_region_name - ) + 'be configured with a fips psuedo region.' + % (region_name, normalized_region_name) ) region_name = normalized_region_name return region_name, client_config def _load_service_model(self, service_name, api_version=None): - json_model = self._loader.load_service_model(service_name, 'service-2', - api_version=api_version) + json_model = self._loader.load_service_model( + service_name, 'service-2', api_version=api_version + ) service_model = ServiceModel(json_model, service_name=service_name) return service_model @@ -198,19 +238,22 @@ class ClientCreator(object): retries = self._transform_legacy_retries(client.meta.config.retries) retry_config = self._retry_config_translator.build_retry_config( - endpoint_prefix, original_config.get('retry', {}), + endpoint_prefix, + original_config.get('retry', {}), original_config.get('definitions', {}), - retries + retries, ) - logger.debug("Registering retry handlers for service: %s", - client.meta.service_model.service_name) + logger.debug( + "Registering retry handlers for service: %s", + client.meta.service_model.service_name, + ) handler = self._retry_handler_factory.create_retry_handler( - retry_config, endpoint_prefix) + retry_config, endpoint_prefix + ) unique_id = 'retry-config-%s' % service_event_name client.meta.events.register( - 'needs-retry.%s' % service_event_name, handler, - unique_id=unique_id + f"needs-retry.{service_event_name}", handler, unique_id=unique_id ) def _transform_legacy_retries(self, retries): @@ -220,13 +263,16 @@ class ClientCreator(object): if 'total_max_attempts' in retries: copied_args = retries.copy() copied_args['max_attempts'] = ( - copied_args.pop('total_max_attempts') - 1) + copied_args.pop('total_max_attempts') - 1 + ) return copied_args def _get_retry_mode(self, client, config_store): client_retries = client.meta.config.retries - if client_retries is not None and \ - client_retries.get('mode') is not None: + if ( + client_retries is not None + and client_retries.get('mode') is not None + ): return client_retries['mode'] return config_store.get_config_variable('retry_mode') or 'legacy' @@ -244,17 +290,22 @@ class ClientCreator(object): enabled = config.endpoint_discovery_enabled elif self._config_store: enabled = self._config_store.get_config_variable( - 'endpoint_discovery_enabled') + 'endpoint_discovery_enabled' + ) enabled = self._normalize_endpoint_discovery_config(enabled) if enabled and self._requires_endpoint_discovery(client, enabled): discover = enabled is True - manager = EndpointDiscoveryManager(client, always_discover=discover) + manager = EndpointDiscoveryManager( + client, always_discover=discover + ) handler = EndpointDiscoveryHandler(manager) handler.register(events, service_id) else: - events.register('before-parameter-build', - block_endpoint_discovery_required_operations) + events.register( + 'before-parameter-build', + block_endpoint_discovery_required_operations, + ) def _normalize_endpoint_discovery_config(self, enabled): """Config must either be a boolean-string or string-literal 'auto'""" @@ -282,11 +333,17 @@ class ClientCreator(object): EventbridgeSignerSetter( endpoint_resolver=self._endpoint_resolver, region=client.meta.region_name, - endpoint_url=endpoint_url + endpoint_url=endpoint_url, ).register(client.meta.events) - def _register_s3_events(self, client, endpoint_bridge, endpoint_url, - client_config, scoped_config): + def _register_s3_events( + self, + client, + endpoint_bridge, + endpoint_url, + client_config, + scoped_config, + ): if client.meta.service_model.service_name != 's3': return S3RegionRedirector(endpoint_bridge, client).register() @@ -298,14 +355,19 @@ class ClientCreator(object): s3_config=client.meta.config.s3, endpoint_url=endpoint_url, partition=client.meta.partition, - use_fips_endpoint=use_fips_endpoint + use_fips_endpoint=use_fips_endpoint, ).register(client.meta.events) self._set_s3_presign_signature_version( - client.meta, client_config, scoped_config) + client.meta, client_config, scoped_config + ) def _register_s3_control_events( - self, client, endpoint_bridge, - endpoint_url, client_config, scoped_config + self, + client, + endpoint_bridge, + endpoint_url, + client_config, + scoped_config, ): if client.meta.service_model.service_name != 's3control': return @@ -317,16 +379,18 @@ class ClientCreator(object): s3_config=client.meta.config.s3, endpoint_url=endpoint_url, partition=client.meta.partition, - use_fips_endpoint=use_fips_endpoint + use_fips_endpoint=use_fips_endpoint, ).register(client.meta.events) - def _set_s3_presign_signature_version(self, client_meta, - client_config, scoped_config): + def _set_s3_presign_signature_version( + self, client_meta, client_config, scoped_config + ): # This will return the manually configured signature version, or None # if none was manually set. If a customer manually sets the signature # version, we always want to use what they set. provided_signature_version = _get_configured_signature_version( - 's3', client_config, scoped_config) + 's3', client_config, scoped_config + ) if provided_signature_version is not None: return @@ -337,15 +401,19 @@ class ClientCreator(object): # global endpoint, we should respect the signature versions it # supports, which includes v2. regions = self._endpoint_resolver.get_available_endpoints( - 's3', client_meta.partition) - if client_meta.region_name != 'aws-global' and \ - client_meta.region_name not in regions: + 's3', client_meta.partition + ) + if ( + client_meta.region_name != 'aws-global' + and client_meta.region_name not in regions + ): return # If it is a region we know about, we want to default to sigv2, so here # we check to see if it is available. endpoint = self._endpoint_resolver.construct_endpoint( - 's3', client_meta.region_name) + 's3', client_meta.region_name + ) signature_versions = endpoint['signatureVersions'] if 's3' not in signature_versions: return @@ -354,7 +422,8 @@ class ClientCreator(object): # the customer hasn't set a signature version so we default the # signature version to sigv2. client_meta.events.register( - 'choose-signer.s3', self._default_s3_presign_to_sigv2) + 'choose-signer.s3', self._default_s3_presign_to_sigv2 + ) def _default_s3_presign_to_sigv2(self, signature_version, **kwargs): """ @@ -374,23 +443,45 @@ class ClientCreator(object): if signature_version.endswith(suffix): return 's3' + suffix - def _get_client_args(self, service_model, region_name, is_secure, - endpoint_url, verify, credentials, - scoped_config, client_config, endpoint_bridge): + def _get_client_args( + self, + service_model, + region_name, + is_secure, + endpoint_url, + verify, + credentials, + scoped_config, + client_config, + endpoint_bridge, + ): args_creator = ClientArgsCreator( - self._event_emitter, self._user_agent, - self._response_parser_factory, self._loader, - self._exceptions_factory, config_store=self._config_store) + self._event_emitter, + self._user_agent, + self._response_parser_factory, + self._loader, + self._exceptions_factory, + config_store=self._config_store, + ) return args_creator.get_client_args( - service_model, region_name, is_secure, endpoint_url, - verify, credentials, scoped_config, client_config, endpoint_bridge) + service_model, + region_name, + is_secure, + endpoint_url, + verify, + credentials, + scoped_config, + client_config, + endpoint_bridge, + ) def _create_methods(self, service_model): op_dict = {} for operation_name in service_model.operation_names: py_operation_name = xform_name(operation_name) op_dict[py_operation_name] = self._create_api_method( - py_operation_name, operation_name, service_model) + py_operation_name, operation_name, service_model + ) return op_dict def _create_name_mapping(self, service_model): @@ -402,15 +493,17 @@ class ClientCreator(object): mapping[py_operation_name] = operation_name return mapping - def _create_api_method(self, py_operation_name, operation_name, - service_model): + def _create_api_method( + self, py_operation_name, operation_name, service_model + ): def _api_call(self, *args, **kwargs): # We're accepting *args so that we can give a more helpful # error message than TypeError: _api_call takes exactly # 1 argument. if args: raise TypeError( - "%s() only accepts keyword arguments." % py_operation_name) + f"{py_operation_name}() only accepts keyword arguments." + ) # The "self" in this scope is referring to the BaseClient. return self._make_api_call(operation_name, kwargs) @@ -424,13 +517,13 @@ class ClientCreator(object): event_emitter=self._event_emitter, method_description=operation_model.documentation, example_prefix='response = client.%s' % py_operation_name, - include_signature=False + include_signature=False, ) _api_call.__doc__ = docstring return _api_call -class ClientEndpointBridge(object): +class ClientEndpointBridge: """Bridges endpoint data and client creation This class handles taking out the relevant arguments from the endpoint @@ -444,9 +537,15 @@ class ClientEndpointBridge(object): DEFAULT_ENDPOINT = '{service}.{region}.amazonaws.com' _DUALSTACK_CUSTOMIZED_SERVICES = ['s3', 's3-control'] - def __init__(self, endpoint_resolver, scoped_config=None, - client_config=None, default_endpoint=None, - service_signing_name=None, config_store=None): + def __init__( + self, + endpoint_resolver, + scoped_config=None, + client_config=None, + default_endpoint=None, + service_signing_name=None, + config_store=None, + ): self.service_signing_name = service_signing_name self.endpoint_resolver = endpoint_resolver self.scoped_config = scoped_config @@ -454,16 +553,19 @@ class ClientEndpointBridge(object): self.default_endpoint = default_endpoint or self.DEFAULT_ENDPOINT self.config_store = config_store - def resolve(self, service_name, region_name=None, endpoint_url=None, - is_secure=True): + def resolve( + self, service_name, region_name=None, endpoint_url=None, is_secure=True + ): region_name = self._check_default_region(service_name, region_name) use_dualstack_endpoint = self._resolve_use_dualstack_endpoint( - service_name) + service_name + ) use_fips_endpoint = self._resolve_endpoint_variant_config_var( 'use_fips_endpoint' ) resolved = self.endpoint_resolver.construct_endpoint( - service_name, region_name, + service_name, + region_name, use_dualstack_endpoint=use_dualstack_endpoint, use_fips_endpoint=use_fips_endpoint, ) @@ -474,17 +576,21 @@ class ClientEndpointBridge(object): # TODO: fallback partition_name should be configurable in the # future for users to define as needed. resolved = self.endpoint_resolver.construct_endpoint( - service_name, region_name, partition_name='aws', + service_name, + region_name, + partition_name='aws', use_dualstack_endpoint=use_dualstack_endpoint, use_fips_endpoint=use_fips_endpoint, ) if resolved: return self._create_endpoint( - resolved, service_name, region_name, endpoint_url, is_secure) + resolved, service_name, region_name, endpoint_url, is_secure + ) else: - return self._assume_endpoint(service_name, region_name, - endpoint_url, is_secure) + return self._assume_endpoint( + service_name, region_name, endpoint_url, is_secure + ) def _check_default_region(self, service_name, region_name): if region_name is not None: @@ -493,23 +599,31 @@ class ClientEndpointBridge(object): if self.client_config and self.client_config.region_name is not None: return self.client_config.region_name - def _create_endpoint(self, resolved, service_name, region_name, - endpoint_url, is_secure): + def _create_endpoint( + self, resolved, service_name, region_name, endpoint_url, is_secure + ): region_name, signing_region = self._pick_region_values( - resolved, region_name, endpoint_url) + resolved, region_name, endpoint_url + ) if endpoint_url is None: # Use the sslCommonName over the hostname for Python 2.6 compat. hostname = resolved.get('sslCommonName', resolved.get('hostname')) - endpoint_url = self._make_url(hostname, is_secure, - resolved.get('protocols', [])) + endpoint_url = self._make_url( + hostname, is_secure, resolved.get('protocols', []) + ) signature_version = self._resolve_signature_version( - service_name, resolved) + service_name, resolved + ) signing_name = self._resolve_signing_name(service_name, resolved) return self._create_result( - service_name=service_name, region_name=region_name, - signing_region=signing_region, signing_name=signing_name, - endpoint_url=endpoint_url, metadata=resolved, - signature_version=signature_version) + service_name=service_name, + region_name=region_name, + signing_region=signing_region, + signing_name=signing_name, + endpoint_url=endpoint_url, + metadata=resolved, + signature_version=signature_version, + ) def _resolve_endpoint_variant_config_var(self, config_var): client_config = self.client_config @@ -528,7 +642,8 @@ class ClientEndpointBridge(object): if s3_dualstack_mode is not None: return s3_dualstack_mode return self._resolve_endpoint_variant_config_var( - 'use_dualstack_endpoint') + 'use_dualstack_endpoint' + ) def _is_s3_dualstack_mode(self, service_name): if service_name not in self._DUALSTACK_CUSTOMIZED_SERVICES: @@ -538,39 +653,59 @@ class ClientEndpointBridge(object): # ClientArgsCreator. _resolve_signature_version also has similarly # duplicated logic. client_config = self.client_config - if client_config is not None and client_config.s3 is not None and \ - 'use_dualstack_endpoint' in client_config.s3: + if ( + client_config is not None + and client_config.s3 is not None + and 'use_dualstack_endpoint' in client_config.s3 + ): # Client config trumps scoped config. return client_config.s3['use_dualstack_endpoint'] if self.scoped_config is not None: enabled = self.scoped_config.get('s3', {}).get( - 'use_dualstack_endpoint') + 'use_dualstack_endpoint' + ) if enabled in [True, 'True', 'true']: return True - def _assume_endpoint(self, service_name, region_name, endpoint_url, - is_secure): + def _assume_endpoint( + self, service_name, region_name, endpoint_url, is_secure + ): if endpoint_url is None: # Expand the default hostname URI template. hostname = self.default_endpoint.format( - service=service_name, region=region_name) - endpoint_url = self._make_url(hostname, is_secure, - ['http', 'https']) - logger.debug('Assuming an endpoint for %s, %s: %s', - service_name, region_name, endpoint_url) + service=service_name, region=region_name + ) + endpoint_url = self._make_url( + hostname, is_secure, ['http', 'https'] + ) + logger.debug( + f'Assuming an endpoint for {service_name}, {region_name}: {endpoint_url}' + ) # We still want to allow the user to provide an explicit version. signature_version = self._resolve_signature_version( - service_name, {'signatureVersions': ['v4']}) + service_name, {'signatureVersions': ['v4']} + ) signing_name = self._resolve_signing_name(service_name, resolved={}) return self._create_result( - service_name=service_name, region_name=region_name, - signing_region=region_name, signing_name=signing_name, - signature_version=signature_version, endpoint_url=endpoint_url, - metadata={}) - - def _create_result(self, service_name, region_name, signing_region, - signing_name, endpoint_url, signature_version, - metadata): + service_name=service_name, + region_name=region_name, + signing_region=region_name, + signing_name=signing_name, + signature_version=signature_version, + endpoint_url=endpoint_url, + metadata={}, + ) + + def _create_result( + self, + service_name, + region_name, + signing_region, + signing_name, + endpoint_url, + signature_version, + metadata, + ): return { 'service_name': service_name, 'region_name': region_name, @@ -578,7 +713,7 @@ class ClientEndpointBridge(object): 'signing_name': signing_name, 'endpoint_url': endpoint_url, 'signature_version': signature_version, - 'metadata': metadata + 'metadata': metadata, } def _make_url(self, hostname, is_secure, supported_protocols): @@ -586,12 +721,14 @@ class ClientEndpointBridge(object): scheme = 'https' else: scheme = 'http' - return '%s://%s' % (scheme, hostname) + return f'{scheme}://{hostname}' def _resolve_signing_name(self, service_name, resolved): # CredentialScope overrides everything else. - if 'credentialScope' in resolved \ - and 'service' in resolved['credentialScope']: + if ( + 'credentialScope' in resolved + and 'service' in resolved['credentialScope'] + ): return resolved['credentialScope']['service'] # Use the signingName from the model if present. if self.service_signing_name: @@ -614,14 +751,17 @@ class ClientEndpointBridge(object): # custom endpoints. region_name = resolved['endpointName'] signing_region = region_name - if 'credentialScope' in resolved \ - and 'region' in resolved['credentialScope']: + if ( + 'credentialScope' in resolved + and 'region' in resolved['credentialScope'] + ): signing_region = resolved['credentialScope']['region'] return region_name, signing_region def _resolve_signature_version(self, service_name, resolved): configured_version = _get_configured_signature_version( - service_name, self.client_config, self.scoped_config) + service_name, self.client_config, self.scoped_config + ) if configured_version is not None: return configured_version @@ -638,10 +778,11 @@ class ClientEndpointBridge(object): if known in AUTH_TYPE_MAPS: return known raise UnknownSignatureVersionError( - signature_version=resolved.get('signatureVersions')) + signature_version=resolved.get('signatureVersions') + ) -class BaseClient(object): +class BaseClient: # This is actually reassigned with the py->op_name mapping # when the client creator creates the subclass. This value is used @@ -651,9 +792,19 @@ class BaseClient(object): # we need the reverse mapping here. _PY_TO_OP_NAME = {} - def __init__(self, serializer, endpoint, response_parser, - event_emitter, request_signer, service_model, loader, - client_config, partition, exceptions_factory): + def __init__( + self, + serializer, + endpoint, + response_parser, + event_emitter, + request_signer, + service_model, + loader, + client_config, + partition, + exceptions_factory, + ): self._serializer = serializer self._endpoint = endpoint self._response_parser = response_parser @@ -661,34 +812,38 @@ class BaseClient(object): self._cache = {} self._loader = loader self._client_config = client_config - self.meta = ClientMeta(event_emitter, self._client_config, - endpoint.host, service_model, - self._PY_TO_OP_NAME, partition) + self.meta = ClientMeta( + event_emitter, + self._client_config, + endpoint.host, + service_model, + self._PY_TO_OP_NAME, + partition, + ) self._exceptions_factory = exceptions_factory self._exceptions = None self._register_handlers() def __getattr__(self, item): - event_name = 'getattr.%s.%s' % ( - self._service_model.service_id.hyphenize(), item - ) + service_id = self._service_model.service_id.hyphenize() + event_name = f'getattr.{service_id}.{item}' + handler, event_response = self.meta.events.emit_until_response( - event_name, client=self) + event_name, client=self + ) if event_response is not None: return event_response raise AttributeError( - "'%s' object has no attribute '%s'" % ( - self.__class__.__name__, item) + f"'{self.__class__.__name__}' object has no attribute '{item}'" ) def _register_handlers(self): # Register the handler required to sign requests. service_id = self.meta.service_model.service_id.hyphenize() self.meta.events.register( - 'request-created.%s' % service_id, - self._request_signer.handler + f"request-created.{service_id}", self._request_signer.handler ) @property @@ -698,14 +853,18 @@ class BaseClient(object): def _make_api_call(self, operation_name, api_params): operation_model = self._service_model.operation_model(operation_name) service_name = self._service_model.service_name - history_recorder.record('API_CALL', { - 'service': service_name, - 'operation': operation_name, - 'params': api_params, - }) + history_recorder.record( + 'API_CALL', + { + 'service': service_name, + 'operation': operation_name, + 'params': api_params, + }, + ) if operation_model.deprecated: - logger.debug('Warning: %s.%s() is deprecated', - service_name, operation_name) + logger.debug( + 'Warning: %s.%s() is deprecated', service_name, operation_name + ) request_context = { 'client_region': self.meta.region_name, 'client_config': self.meta.config, @@ -713,30 +872,37 @@ class BaseClient(object): 'auth_type': operation_model.auth_type, } request_dict = self._convert_to_request_dict( - api_params, operation_model, context=request_context) + api_params, operation_model, context=request_context + ) resolve_checksum_context(request_dict, operation_model, api_params) service_id = self._service_model.service_id.hyphenize() handler, event_response = self.meta.events.emit_until_response( 'before-call.{service_id}.{operation_name}'.format( - service_id=service_id, - operation_name=operation_name), - model=operation_model, params=request_dict, - request_signer=self._request_signer, context=request_context) + service_id=service_id, operation_name=operation_name + ), + model=operation_model, + params=request_dict, + request_signer=self._request_signer, + context=request_context, + ) if event_response is not None: http, parsed_response = event_response else: apply_request_checksum(request_dict) http, parsed_response = self._make_request( - operation_model, request_dict, request_context) + operation_model, request_dict, request_context + ) self.meta.events.emit( 'after-call.{service_id}.{operation_name}'.format( - service_id=service_id, - operation_name=operation_name), - http_response=http, parsed=parsed_response, - model=operation_model, context=request_context + service_id=service_id, operation_name=operation_name + ), + http_response=http, + parsed=parsed_response, + model=operation_model, + context=request_context, ) if http.status_code >= 300: @@ -753,22 +919,30 @@ class BaseClient(object): self.meta.events.emit( 'after-call-error.{service_id}.{operation_name}'.format( service_id=self._service_model.service_id.hyphenize(), - operation_name=operation_model.name), - exception=e, context=request_context + operation_name=operation_model.name, + ), + exception=e, + context=request_context, ) raise - def _convert_to_request_dict(self, api_params, operation_model, - context=None): + def _convert_to_request_dict( + self, api_params, operation_model, context=None + ): api_params = self._emit_api_params( - api_params, operation_model, context) + api_params, operation_model, context + ) request_dict = self._serializer.serialize_to_request( - api_params, operation_model) + api_params, operation_model + ) if not self._client_config.inject_host_prefix: request_dict.pop('host_prefix', None) - prepare_request_dict(request_dict, endpoint_url=self._endpoint.host, - user_agent=self._client_config.user_agent, - context=context) + prepare_request_dict( + request_dict, + endpoint_url=self._endpoint.host, + user_agent=self._client_config.user_agent, + context=context, + ) return request_dict def _emit_api_params(self, api_params, operation_model, context): @@ -781,19 +955,19 @@ class BaseClient(object): # parameters or return a new set of parameters to use. service_id = self._service_model.service_id.hyphenize() responses = self.meta.events.emit( - 'provide-client-params.{service_id}.{operation_name}'.format( - service_id=service_id, - operation_name=operation_name), - params=api_params, model=operation_model, context=context) + f'provide-client-params.{service_id}.{operation_name}', + params=api_params, + model=operation_model, + context=context, + ) api_params = first_non_none_response(responses, default=api_params) - event_name = ( - 'before-parameter-build.{service_id}.{operation_name}') self.meta.events.emit( - event_name.format( - service_id=service_id, - operation_name=operation_name), - params=api_params, model=operation_model, context=context) + f'before-parameter-build.{service_id}.{operation_name}', + params=api_params, + model=operation_model, + context=context, + ) return api_params def get_paginator(self, operation_name): @@ -827,30 +1001,38 @@ class BaseClient(object): return Paginator.paginate(self, **kwargs) paginator_config = self._cache['page_config'][ - actual_operation_name] + actual_operation_name + ] # Add the docstring for the paginate method. paginate.__doc__ = PaginatorDocstring( paginator_name=actual_operation_name, event_emitter=self.meta.events, service_model=self.meta.service_model, paginator_config=paginator_config, - include_signature=False + include_signature=False, ) # Rename the paginator class based on the type of paginator. - paginator_class_name = str('%s.Paginator.%s' % ( - get_service_module_name(self.meta.service_model), - actual_operation_name)) + service_module_name = get_service_module_name( + self.meta.service_model + ) + paginator_class_name = ( + f"{service_module_name}.Paginator.{actual_operation_name}" + ) # Create the new paginator class documented_paginator_cls = type( - paginator_class_name, (Paginator,), {'paginate': paginate}) + paginator_class_name, (Paginator,), {'paginate': paginate} + ) - operation_model = self._service_model.operation_model(actual_operation_name) + operation_model = self._service_model.operation_model( + actual_operation_name + ) paginator = documented_paginator_cls( getattr(self, operation_name), paginator_config, - operation_model) + operation_model, + ) return paginator def can_paginate(self, operation_name): @@ -873,7 +1055,8 @@ class BaseClient(object): page_config = self._loader.load_service_model( self._service_model.service_name, 'paginators-1', - self._service_model.api_version)['pagination'] + self._service_model.api_version, + )['pagination'] self._cache['page_config'] = page_config except DataNotFoundError: self._cache['page_config'] = {} @@ -886,7 +1069,8 @@ class BaseClient(object): waiter_config = self._loader.load_service_model( self._service_model.service_name, 'waiters-2', - self._service_model.api_version) + self._service_model.api_version, + ) self._cache['waiter_config'] = waiter_config except DataNotFoundError: self._cache['waiter_config'] = {} @@ -913,7 +1097,8 @@ class BaseClient(object): raise ValueError("Waiter does not exist: %s" % waiter_name) return waiter.create_waiter_with_client( - mapping[waiter_name], model, self) + mapping[waiter_name], model, self + ) @CachedProperty def waiter_names(self): @@ -934,10 +1119,11 @@ class BaseClient(object): def _load_exceptions(self): return self._exceptions_factory.create_client_exceptions( - self._service_model) + self._service_model + ) -class ClientMeta(object): +class ClientMeta: """Holds additional client methods. This class holds additional information for clients. It exists for @@ -950,8 +1136,15 @@ class ClientMeta(object): """ - def __init__(self, events, client_config, endpoint_url, service_model, - method_to_api_mapping, partition): + def __init__( + self, + events, + client_config, + endpoint_url, + service_model, + method_to_api_mapping, + partition, + ): self.events = events self._client_config = client_config self._endpoint_url = endpoint_url @@ -984,8 +1177,9 @@ class ClientMeta(object): return self._partition -def _get_configured_signature_version(service_name, client_config, - scoped_config): +def _get_configured_signature_version( + service_name, client_config, scoped_config +): """ Gets the manually configured signature version. @@ -1007,6 +1201,8 @@ def _get_configured_signature_version(service_name, client_config, logger.debug( "Switching signature version for service %s " "to version %s based on config file override.", - service_name, version) + service_name, + version, + ) return version return None diff --git a/contrib/python/botocore/py3/botocore/compat.py b/contrib/python/botocore/py3/botocore/compat.py index 23ef7ec8d9..94907c7215 100644 --- a/contrib/python/botocore/py3/botocore/compat.py +++ b/contrib/python/botocore/py3/botocore/compat.py @@ -102,7 +102,8 @@ def filter_ssl_warnings(): 'ignore', message="A true SSLContext object is not available.*", category=exceptions.InsecurePlatformWarning, - module=r".*urllib3\.util\.ssl_") + module=r".*urllib3\.util\.ssl_", + ) @classmethod @@ -120,6 +121,7 @@ def from_pairs(cls, pairs): new_instance[key] = value return new_instance + HTTPHeaders.from_dict = from_dict HTTPHeaders.from_pairs = from_pairs @@ -281,6 +283,7 @@ def get_tzinfo_options(): # this happens, which will get time info from the Windows registry. if sys.platform == 'win32': from dateutil.tz import tzwinlocal + return (tzlocal, tzwinlocal) else: return (tzlocal,) @@ -289,6 +292,7 @@ def get_tzinfo_options(): # Detect if CRT is available for use try: import awscrt.auth + # Allow user opt-out if needed disabled = os.environ.get('BOTO_DISABLE_CRT', "false") HAS_CRT = not disabled.lower() == 'true' @@ -327,7 +331,9 @@ _variations = [ "(?:(?:%(hex)s:){0,6}%(hex)s)?::", ] -UNRESERVED_PAT = r"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._!\-~" +UNRESERVED_PAT = ( + r"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._!\-~" +) IPV6_PAT = "(?:" + "|".join([x % _subs for x in _variations]) + ")" ZONE_ID_PAT = "(?:%25|%)(?:[" + UNRESERVED_PAT + "]|%[a-fA-F0-9]{2})+" IPV6_ADDRZ_PAT = r"\[" + IPV6_PAT + r"(?:" + ZONE_ID_PAT + r")?\]" diff --git a/contrib/python/botocore/py3/botocore/config.py b/contrib/python/botocore/py3/botocore/config.py index dea544c19f..0187fca765 100644 --- a/contrib/python/botocore/py3/botocore/config.py +++ b/contrib/python/botocore/py3/botocore/config.py @@ -22,7 +22,7 @@ from botocore.exceptions import ( ) -class Config(object): +class Config: """Advanced configuration for Botocore clients. :type region_name: str @@ -184,26 +184,29 @@ class Config(object): Defaults to None. """ - OPTION_DEFAULTS = OrderedDict([ - ('region_name', None), - ('signature_version', None), - ('user_agent', None), - ('user_agent_extra', None), - ('connect_timeout', DEFAULT_TIMEOUT), - ('read_timeout', DEFAULT_TIMEOUT), - ('parameter_validation', True), - ('max_pool_connections', MAX_POOL_CONNECTIONS), - ('proxies', None), - ('proxies_config', None), - ('s3', None), - ('retries', None), - ('client_cert', None), - ('inject_host_prefix', True), - ('endpoint_discovery_enabled', None), - ('use_dualstack_endpoint', None), - ('use_fips_endpoint', None), - ('defaults_mode', None) - ]) + + OPTION_DEFAULTS = OrderedDict( + [ + ('region_name', None), + ('signature_version', None), + ('user_agent', None), + ('user_agent_extra', None), + ('connect_timeout', DEFAULT_TIMEOUT), + ('read_timeout', DEFAULT_TIMEOUT), + ('parameter_validation', True), + ('max_pool_connections', MAX_POOL_CONNECTIONS), + ('proxies', None), + ('proxies_config', None), + ('s3', None), + ('retries', None), + ('client_cert', None), + ('inject_host_prefix', True), + ('endpoint_discovery_enabled', None), + ('use_dualstack_endpoint', None), + ('use_fips_endpoint', None), + ('defaults_mode', None), + ] + ) NON_LEGACY_OPTION_DEFAULTS = { 'connect_timeout': None, @@ -211,12 +214,14 @@ class Config(object): def __init__(self, *args, **kwargs): self._user_provided_options = self._record_user_provided_options( - args, kwargs) + args, kwargs + ) # Merge the user_provided options onto the default options config_vars = copy.copy(self.OPTION_DEFAULTS) defaults_mode = self._user_provided_options.get( - 'defaults_mode', 'legacy') + 'defaults_mode', 'legacy' + ) if defaults_mode != 'legacy': config_vars.update(self.NON_LEGACY_OPTION_DEFAULTS) config_vars.update(self._user_provided_options) @@ -241,15 +246,14 @@ class Config(object): user_provided_options[key] = value # The key must exist in the available options else: - raise TypeError( - 'Got unexpected keyword argument \'%s\'' % key) + raise TypeError(f"Got unexpected keyword argument '{key}'") # The number of args should not be longer than the allowed # options if len(args) > len(option_order): raise TypeError( - 'Takes at most %s arguments (%s given)' % ( - len(option_order), len(args))) + f"Takes at most {len(option_order)} arguments ({len(args)} given)" + ) # Iterate through the args passed through to the constructor and map # them to appropriate keys. @@ -257,8 +261,8 @@ class Config(object): # If it a kwarg was specified for the arg, then error out if option_order[i] in user_provided_options: raise TypeError( - 'Got multiple values for keyword argument \'%s\'' % ( - option_order[i])) + f"Got multiple values for keyword argument '{option_order[i]}'" + ) user_provided_options[option_order[i]] = arg return user_provided_options @@ -268,14 +272,16 @@ class Config(object): addressing_style = s3.get('addressing_style') if addressing_style not in ['virtual', 'auto', 'path', None]: raise InvalidS3AddressingStyleError( - s3_addressing_style=addressing_style) + s3_addressing_style=addressing_style + ) def _validate_retry_configuration(self, retries): if retries is not None: for key, value in retries.items(): if key not in ['max_attempts', 'mode', 'total_max_attempts']: raise InvalidRetryConfigurationError( - retry_config_option=key) + retry_config_option=key + ) if key == 'max_attempts' and value < 0: raise InvalidMaxRetryAttemptsError( provided_max_attempts=value, @@ -286,11 +292,12 @@ class Config(object): provided_max_attempts=value, min_value=1, ) - if key == 'mode' and value not in ['legacy', 'standard', - 'adaptive']: - raise InvalidRetryModeError( - provided_retry_mode=value - ) + if key == 'mode' and value not in ( + 'legacy', + 'standard', + 'adaptive', + ): + raise InvalidRetryModeError(provided_retry_mode=value) def merge(self, other_config): """Merges the config object with another config object diff --git a/contrib/python/botocore/py3/botocore/configloader.py b/contrib/python/botocore/py3/botocore/configloader.py index 7adfcb5b6e..870ca4c183 100644 --- a/contrib/python/botocore/py3/botocore/configloader.py +++ b/contrib/python/botocore/py3/botocore/configloader.py @@ -148,7 +148,8 @@ def raw_config_parse(config_filename, parse_subsections=True): cp.read([path]) except (six.moves.configparser.Error, UnicodeDecodeError) as e: raise botocore.exceptions.ConfigParseError( - path=_unicode_path(path), error=e) from None + path=_unicode_path(path), error=e + ) from None else: for section in cp.sections(): config[section] = {} @@ -162,13 +163,14 @@ def raw_config_parse(config_filename, parse_subsections=True): config_value = _parse_nested(config_value) except ValueError as e: raise botocore.exceptions.ConfigParseError( - path=_unicode_path(path), error=e) from None + path=_unicode_path(path), error=e + ) from None config[section][option] = config_value return config def _unicode_path(path): - if isinstance(path, six.text_type): + if isinstance(path, str): return path # According to the documentation getfilesystemencoding can return None # on unix in which case the default encoding is used instead. diff --git a/contrib/python/botocore/py3/botocore/configprovider.py b/contrib/python/botocore/py3/botocore/configprovider.py index e822ff2ec0..240e771c0c 100644 --- a/contrib/python/botocore/py3/botocore/configprovider.py +++ b/contrib/python/botocore/py3/botocore/configprovider.py @@ -54,59 +54,85 @@ BOTOCORE_DEFAUT_SESSION_VARIABLES = { 'config_file': (None, 'AWS_CONFIG_FILE', '~/.aws/config', None), 'ca_bundle': ('ca_bundle', 'AWS_CA_BUNDLE', None, None), 'api_versions': ('api_versions', None, {}, None), - # This is the shared credentials file amongst sdks. - 'credentials_file': (None, 'AWS_SHARED_CREDENTIALS_FILE', - '~/.aws/credentials', None), - + 'credentials_file': ( + None, + 'AWS_SHARED_CREDENTIALS_FILE', + '~/.aws/credentials', + None, + ), # These variables only exist in the config file. - # This is the number of seconds until we time out a request to # the instance metadata service. 'metadata_service_timeout': ( 'metadata_service_timeout', - 'AWS_METADATA_SERVICE_TIMEOUT', 1, int), + 'AWS_METADATA_SERVICE_TIMEOUT', + 1, + int, + ), # This is the number of request attempts we make until we give # up trying to retrieve data from the instance metadata service. 'metadata_service_num_attempts': ( 'metadata_service_num_attempts', - 'AWS_METADATA_SERVICE_NUM_ATTEMPTS', 1, int), + 'AWS_METADATA_SERVICE_NUM_ATTEMPTS', + 1, + int, + ), 'ec2_metadata_service_endpoint': ( 'ec2_metadata_service_endpoint', 'AWS_EC2_METADATA_SERVICE_ENDPOINT', - None, None), + None, + None, + ), 'ec2_metadata_service_endpoint_mode': ( 'ec2_metadata_service_endpoint_mode', 'AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE', - None, None), + None, + None, + ), 'imds_use_ipv6': ( 'imds_use_ipv6', 'AWS_IMDS_USE_IPV6', - False, utils.ensure_boolean), + False, + utils.ensure_boolean, + ), 'use_dualstack_endpoint': ( 'use_dualstack_endpoint', 'AWS_USE_DUALSTACK_ENDPOINT', - None, utils.ensure_boolean), + None, + utils.ensure_boolean, + ), 'use_fips_endpoint': ( 'use_fips_endpoint', 'AWS_USE_FIPS_ENDPOINT', - None, utils.ensure_boolean), + None, + utils.ensure_boolean, + ), 'parameter_validation': ('parameter_validation', None, True, None), # Client side monitoring configurations. # Note: These configurations are considered internal to botocore. # Do not use them until publicly documented. 'csm_enabled': ( - 'csm_enabled', 'AWS_CSM_ENABLED', False, utils.ensure_boolean), + 'csm_enabled', + 'AWS_CSM_ENABLED', + False, + utils.ensure_boolean, + ), 'csm_host': ('csm_host', 'AWS_CSM_HOST', '127.0.0.1', None), 'csm_port': ('csm_port', 'AWS_CSM_PORT', 31000, int), 'csm_client_id': ('csm_client_id', 'AWS_CSM_CLIENT_ID', '', None), # Endpoint discovery configuration 'endpoint_discovery_enabled': ( - 'endpoint_discovery_enabled', 'AWS_ENDPOINT_DISCOVERY_ENABLED', - 'auto', None), + 'endpoint_discovery_enabled', + 'AWS_ENDPOINT_DISCOVERY_ENABLED', + 'auto', + None, + ), 'sts_regional_endpoints': ( - 'sts_regional_endpoints', 'AWS_STS_REGIONAL_ENDPOINTS', 'legacy', - None + 'sts_regional_endpoints', + 'AWS_STS_REGIONAL_ENDPOINTS', + 'legacy', + None, ), 'retry_mode': ('retry_mode', 'AWS_RETRY_MODE', 'legacy', None), 'defaults_mode': ('defaults_mode', 'AWS_DEFAULTS_MODE', 'legacy', None), @@ -118,30 +144,45 @@ BOTOCORE_DEFAUT_SESSION_VARIABLES = { # vars that typically go in the s3 section of the config file. This mapping # follows the same schema as the previous session variable mapping. DEFAULT_S3_CONFIG_VARS = { - 'addressing_style': ( - ('s3', 'addressing_style'), None, None, None), + 'addressing_style': (('s3', 'addressing_style'), None, None, None), 'use_accelerate_endpoint': ( - ('s3', 'use_accelerate_endpoint'), None, None, utils.ensure_boolean + ('s3', 'use_accelerate_endpoint'), + None, + None, + utils.ensure_boolean, ), 'use_dualstack_endpoint': ( - ('s3', 'use_dualstack_endpoint'), None, None, utils.ensure_boolean + ('s3', 'use_dualstack_endpoint'), + None, + None, + utils.ensure_boolean, ), 'payload_signing_enabled': ( - ('s3', 'payload_signing_enabled'), None, None, utils.ensure_boolean + ('s3', 'payload_signing_enabled'), + None, + None, + utils.ensure_boolean, ), 'use_arn_region': ( - ['s3_use_arn_region', - ('s3', 'use_arn_region')], - 'AWS_S3_USE_ARN_REGION', None, utils.ensure_boolean + ['s3_use_arn_region', ('s3', 'use_arn_region')], + 'AWS_S3_USE_ARN_REGION', + None, + utils.ensure_boolean, ), 'us_east_1_regional_endpoint': ( - ['s3_us_east_1_regional_endpoint', - ('s3', 'us_east_1_regional_endpoint')], - 'AWS_S3_US_EAST_1_REGIONAL_ENDPOINT', None, None + [ + 's3_us_east_1_regional_endpoint', + ('s3', 'us_east_1_regional_endpoint'), + ], + 'AWS_S3_US_EAST_1_REGIONAL_ENDPOINT', + None, + None, ), 's3_disable_multiregion_access_points': ( ('s3', 's3_disable_multiregion_access_points'), - 'AWS_S3_DISABLE_MULTIREGION_ACCESS_POINTS', None, utils.ensure_boolean + 'AWS_S3_DISABLE_MULTIREGION_ACCESS_POINTS', + None, + utils.ensure_boolean, ), } # A mapping for the proxy specific configuration vars. These are @@ -151,21 +192,30 @@ DEFAULT_PROXIES_CONFIG_VARS = { 'proxy_ca_bundle': ('proxy_ca_bundle', None, None, None), 'proxy_client_cert': ('proxy_client_cert', None, None, None), 'proxy_use_forwarding_for_https': ( - 'proxy_use_forwarding_for_https', None, None, utils.normalize_boolean), + 'proxy_use_forwarding_for_https', + None, + None, + utils.normalize_boolean, + ), } def create_botocore_default_config_mapping(session): chain_builder = ConfigChainFactory(session=session) config_mapping = _create_config_chain_mapping( - chain_builder, BOTOCORE_DEFAUT_SESSION_VARIABLES) + chain_builder, BOTOCORE_DEFAUT_SESSION_VARIABLES + ) config_mapping['s3'] = SectionConfigProvider( - 's3', session, _create_config_chain_mapping( - chain_builder, DEFAULT_S3_CONFIG_VARS) + 's3', + session, + _create_config_chain_mapping(chain_builder, DEFAULT_S3_CONFIG_VARS), ) config_mapping['proxies_config'] = SectionConfigProvider( - 'proxies_config', session, _create_config_chain_mapping( - chain_builder, DEFAULT_PROXIES_CONFIG_VARS) + 'proxies_config', + session, + _create_config_chain_mapping( + chain_builder, DEFAULT_PROXIES_CONFIG_VARS + ), ) return config_mapping @@ -178,13 +228,12 @@ def _create_config_chain_mapping(chain_builder, config_variables): env_var_names=config[1], config_property_names=config[0], default=config[2], - conversion_func=config[3] + conversion_func=config[3], ) return mapping class DefaultConfigResolver: - def __init__(self, default_config_data): self._base_default_config = default_config_data['base'] self._modes = default_config_data['modes'] @@ -220,13 +269,14 @@ class DefaultConfigResolver: return self._resolved_default_configurations[mode] -class ConfigChainFactory(object): +class ConfigChainFactory: """Factory class to create our most common configuration chain case. This is a convenience class to construct configuration chains that follow our most common pattern. This is to prevent ordering them incorrectly, and to make the config chain construction more readable. """ + def __init__(self, session, environ=None): """Initialize a ConfigChainFactory. @@ -243,9 +293,14 @@ class ConfigChainFactory(object): environ = os.environ self._environ = environ - def create_config_chain(self, instance_name=None, env_var_names=None, - config_property_names=None, default=None, - conversion_func=None): + def create_config_chain( + self, + instance_name=None, + env_var_names=None, + config_property_names=None, + default=None, + conversion_func=None, + ): """Build a config chain following the standard botocore pattern. In botocore most of our config chains follow the the precendence: @@ -287,8 +342,7 @@ class ConfigChainFactory(object): if instance_name is not None: providers.append( InstanceVarProvider( - instance_var=instance_name, - session=self._session + instance_var=instance_name, session=self._session ) ) if env_var_names is not None: @@ -329,8 +383,9 @@ class ConfigChainFactory(object): return scoped_config_providers -class ConfigValueStore(object): +class ConfigValueStore: """The ConfigValueStore object stores configuration values.""" + def __init__(self, mapping=None): """Initialize a ConfigValueStore. @@ -347,9 +402,7 @@ class ConfigValueStore(object): self.set_config_provider(logical_name, provider) def __deepcopy__(self, memo): - return ConfigValueStore( - copy.deepcopy(self._mapping, memo) - ) + return ConfigValueStore(copy.deepcopy(self._mapping, memo)) def get_config_variable(self, logical_name): """ @@ -385,7 +438,10 @@ class ConfigValueStore(object): :returns: configuration provider or None if not defined. """ - if logical_name in self._overrides or logical_name not in self._mapping: + if ( + logical_name in self._overrides + or logical_name not in self._mapping + ): return None provider = self._mapping[logical_name] return provider @@ -444,7 +500,6 @@ class ConfigValueStore(object): class SmartDefaultsConfigStoreFactory: - def __init__(self, default_config_resolver, imds_region_provider): self._default_config_resolver = default_config_resolver self._imds_region_provider = imds_region_provider @@ -455,8 +510,9 @@ class SmartDefaultsConfigStoreFactory: def merge_smart_defaults(self, config_store, mode, region_name): if mode == 'auto': mode = self.resolve_auto_mode(region_name) - default_configs = self._default_config_resolver.get_default_config_values( - mode) + default_configs = ( + self._default_config_resolver.get_default_config_values(mode) + ) for config_var in default_configs: config_value = default_configs[config_var] method = getattr(self, f'_set_{config_var}', None) @@ -473,8 +529,7 @@ class SmartDefaultsConfigStoreFactory: current_region = self._instance_metadata_region else: try: - current_region = \ - self._imds_region_provider.provide() + current_region = self._imds_region_provider.provide() self._instance_metadata_region = current_region except Exception: pass @@ -493,13 +548,18 @@ class SmartDefaultsConfigStoreFactory: provider.set_default_provider(default_provider) return elif isinstance(provider, BaseProvider): - default_provider = ChainProvider(providers=[provider, default_provider]) + default_provider = ChainProvider( + providers=[provider, default_provider] + ) config_store.set_config_provider(variable, default_provider) - def _update_section_provider(self, config_store, section_name, variable, - value): + def _update_section_provider( + self, config_store, section_name, variable, value + ): section_provider = config_store.get_config_provider(section_name) - section_provider.set_default_provider(variable, ConstantProvider(value)) + section_provider.set_default_provider( + variable, ConstantProvider(value) + ) def _set_retryMode(self, config_store, value): self._update_provider(config_store, 'retry_mode', value) @@ -509,18 +569,20 @@ class SmartDefaultsConfigStoreFactory: def _set_s3UsEast1RegionalEndpoints(self, config_store, value): self._update_section_provider( - config_store, 's3', 'us_east_1_regional_endpoint', value) + config_store, 's3', 'us_east_1_regional_endpoint', value + ) def _set_connectTimeoutInMillis(self, config_store, value): - self._update_provider(config_store, 'connect_timeout', value/1000) + self._update_provider(config_store, 'connect_timeout', value / 1000) -class BaseProvider(object): +class BaseProvider: """Base class for configuration value providers. A configuration provider has some method of providing a configuration value. """ + def provide(self): """Provide a config value.""" raise NotImplementedError('provide') @@ -532,6 +594,7 @@ class ChainProvider(BaseProvider): Each provider in the chain is called, the first one returning a non-None value is then returned. """ + def __init__(self, providers=None, conversion_func=None): """Initalize a ChainProvider. @@ -551,8 +614,7 @@ class ChainProvider(BaseProvider): def __deepcopy__(self, memo): return ChainProvider( - copy.deepcopy(self._providers, memo), - self._conversion_func + copy.deepcopy(self._providers, memo), self._conversion_func ) def provide(self): @@ -569,14 +631,17 @@ class ChainProvider(BaseProvider): return None def set_default_provider(self, default_provider): - if self._providers and isinstance(self._providers[-1], ConstantProvider): + if self._providers and isinstance( + self._providers[-1], ConstantProvider + ): self._providers[-1] = default_provider else: self._providers.append(default_provider) - num_of_constants = sum(isinstance( - provider, ConstantProvider - ) for provider in self._providers) + num_of_constants = sum( + isinstance(provider, ConstantProvider) + for provider in self._providers + ) if num_of_constants > 1: logger.info( 'ChainProvider object contains multiple ' @@ -594,6 +659,7 @@ class ChainProvider(BaseProvider): class InstanceVarProvider(BaseProvider): """This class loads config values from the session instance vars.""" + def __init__(self, instance_var, session): """Initialize InstanceVarProvider. @@ -609,8 +675,7 @@ class InstanceVarProvider(BaseProvider): def __deepcopy__(self, memo): return InstanceVarProvider( - copy.deepcopy(self._instance_var, memo), - self._session + copy.deepcopy(self._instance_var, memo), self._session ) def provide(self): @@ -620,7 +685,7 @@ class InstanceVarProvider(BaseProvider): return value def __repr__(self): - return 'InstanceVarProvider(instance_var=%s, session=%s)' % ( + return 'InstanceVarProvider(instance_var={}, session={})'.format( self._instance_var, self._session, ) @@ -645,8 +710,7 @@ class ScopedConfigProvider(BaseProvider): def __deepcopy__(self, memo): return ScopedConfigProvider( - copy.deepcopy(self._config_var_name, memo), - self._session + copy.deepcopy(self._config_var_name, memo), self._session ) def provide(self): @@ -660,7 +724,7 @@ class ScopedConfigProvider(BaseProvider): return scoped_config.get(self._config_var_name) def __repr__(self): - return 'ScopedConfigProvider(config_var_name=%s, session=%s)' % ( + return 'ScopedConfigProvider(config_var_name={}, session={})'.format( self._config_var_name, self._session, ) @@ -668,6 +732,7 @@ class ScopedConfigProvider(BaseProvider): class EnvironmentProvider(BaseProvider): """This class loads config values from environment variables.""" + def __init__(self, name, env): """Initialize with the keys in the dictionary to check. @@ -682,8 +747,7 @@ class EnvironmentProvider(BaseProvider): def __deepcopy__(self, memo): return EnvironmentProvider( - copy.deepcopy(self._name, memo), - copy.deepcopy(self._env, memo) + copy.deepcopy(self._name, memo), copy.deepcopy(self._env, memo) ) def provide(self): @@ -693,7 +757,7 @@ class EnvironmentProvider(BaseProvider): return None def __repr__(self): - return 'EnvironmentProvider(name=%s, env=%s)' % (self._name, self._env) + return f'EnvironmentProvider(name={self._name}, env={self._env})' class SectionConfigProvider(BaseProvider): @@ -702,11 +766,13 @@ class SectionConfigProvider(BaseProvider): This is useful for retrieving scoped config variables (i.e. s3) that have their own set of config variables and resolving logic. """ + def __init__(self, section_name, session, override_providers=None): self._section_name = section_name self._session = session self._scoped_config_provider = ScopedConfigProvider( - self._section_name, self._session) + self._section_name, self._session + ) self._override_providers = override_providers if self._override_providers is None: self._override_providers = {} @@ -715,15 +781,18 @@ class SectionConfigProvider(BaseProvider): return SectionConfigProvider( copy.deepcopy(self._section_name, memo), self._session, - copy.deepcopy(self._override_providers, memo) + copy.deepcopy(self._override_providers, memo), ) def provide(self): section_config = self._scoped_config_provider.provide() if section_config and not isinstance(section_config, dict): - logger.debug("The %s config key is not a dictionary type, " - "ignoring its value of: %s", self._section_name, - section_config) + logger.debug( + "The %s config key is not a dictionary type, " + "ignoring its value of: %s", + self._section_name, + section_config, + ) return None for section_config_var, provider in self._override_providers.items(): provider_val = provider.provide() @@ -739,21 +808,22 @@ class SectionConfigProvider(BaseProvider): provider.set_default_provider(default_provider) return elif isinstance(provider, BaseProvider): - default_provider = ChainProvider(providers=[provider, default_provider]) + default_provider = ChainProvider( + providers=[provider, default_provider] + ) self._override_providers[key] = default_provider def __repr__(self): return ( - 'SectionConfigProvider(section_name=%s, ' - 'session=%s, override_providers=%s)' % ( - self._section_name, self._session, - self._override_providers, - ) + f'SectionConfigProvider(section_name={self._section_name}, ' + f'session={self._session}, ' + f'override_providers={self._override_providers})' ) class ConstantProvider(BaseProvider): """This provider provides a constant value.""" + def __init__(self, value): self._value = value diff --git a/contrib/python/botocore/py3/botocore/credentials.py b/contrib/python/botocore/py3/botocore/credentials.py index 9227fe16bf..79a58103e4 100644 --- a/contrib/python/botocore/py3/botocore/credentials.py +++ b/contrib/python/botocore/py3/botocore/credentials.py @@ -53,8 +53,9 @@ from botocore.utils import ( ) logger = logging.getLogger(__name__) -ReadOnlyCredentials = namedtuple('ReadOnlyCredentials', - ['access_key', 'secret_key', 'token']) +ReadOnlyCredentials = namedtuple( + 'ReadOnlyCredentials', ['access_key', 'secret_key', 'token'] +) _DEFAULT_MANDATORY_REFRESH_TIMEOUT = 10 * 60 # 10 min _DEFAULT_ADVISORY_REFRESH_TIMEOUT = 15 * 60 # 15 min @@ -75,9 +76,11 @@ def create_credential_resolver(session, cache=None, region_name=None): imds_config = { 'ec2_metadata_service_endpoint': session.get_config_variable( - 'ec2_metadata_service_endpoint'), + 'ec2_metadata_service_endpoint' + ), 'ec2_metadata_service_endpoint_mode': resolve_imds_endpoint_mode( - session), + session + ), 'ec2_credential_refresh_window': _DEFAULT_ADVISORY_REFRESH_TIMEOUT, } @@ -91,19 +94,21 @@ def create_credential_resolver(session, cache=None, region_name=None): timeout=metadata_timeout, num_attempts=num_attempts, user_agent=session.user_agent(), - config=imds_config) + config=imds_config, + ) ) profile_provider_builder = ProfileProviderBuilder( - session, cache=cache, region_name=region_name) + session, cache=cache, region_name=region_name + ) assume_role_provider = AssumeRoleProvider( load_config=lambda: session.full_config, client_creator=_get_client_creator(session, region_name), cache=cache, profile_name=profile_name, - credential_sourcer=CanonicalNameCredentialSourcer([ - env_provider, container_provider, instance_metadata_provider - ]), + credential_sourcer=CanonicalNameCredentialSourcer( + [env_provider, container_provider, instance_metadata_provider] + ), profile_provider_builder=profile_provider_builder, ) @@ -140,14 +145,16 @@ def create_credential_resolver(session, cache=None, region_name=None): # EnvProvider does not return credentials, which is what we want # in this scenario. providers.remove(env_provider) - logger.debug('Skipping environment variable credential check' - ' because profile name was explicitly set.') + logger.debug( + 'Skipping environment variable credential check' + ' because profile name was explicitly set.' + ) resolver = CredentialResolver(providers=providers) return resolver -class ProfileProviderBuilder(object): +class ProfileProviderBuilder: """This class handles the creation of profile based providers. NOTE: This class is only intended for internal use. @@ -157,8 +164,10 @@ class ProfileProviderBuilder(object): This is needed to enable sharing between the default credential chain and the source profile chain created by the assume role provider. """ - def __init__(self, session, cache=None, region_name=None, - sso_token_cache=None): + + def __init__( + self, session, cache=None, region_name=None, sso_token_cache=None + ): self._session = session self._cache = cache self._region_name = region_name @@ -167,7 +176,8 @@ class ProfileProviderBuilder(object): def providers(self, profile_name, disable_env_vars=False): return [ self._create_web_identity_provider( - profile_name, disable_env_vars, + profile_name, + disable_env_vars, ), self._create_sso_provider(profile_name), self._create_shared_credential_provider(profile_name), @@ -199,7 +209,8 @@ class ProfileProviderBuilder(object): return AssumeRoleWithWebIdentityProvider( load_config=lambda: self._session.full_config, client_creator=_get_client_creator( - self._session, self._region_name), + self._session, self._region_name + ), cache=self._cache, profile_name=profile_name, disable_env_vars=disable_env_vars, @@ -240,9 +251,7 @@ def _serialize_if_needed(value, iso=False): def _get_client_creator(session, region_name): def client_creator(service_name, **kwargs): - create_client_kwargs = { - 'region_name': region_name - } + create_client_kwargs = {'region_name': region_name} create_client_kwargs.update(**kwargs) return session.create_client(service_name, **create_client_kwargs) @@ -261,12 +270,12 @@ def create_assume_role_refresher(client, params): 'token': credentials['SessionToken'], 'expiry_time': _serialize_if_needed(credentials['Expiration']), } + return refresh def create_mfa_serial_refresher(actual_refresh): - - class _Refresher(object): + class _Refresher: def __init__(self, refresh): self._refresh = refresh self._has_been_called = False @@ -283,7 +292,7 @@ def create_mfa_serial_refresher(actual_refresh): return _Refresher(actual_refresh) -class JSONFileCache(object): +class JSONFileCache: """JSON file cache. This provides a dict like interface that stores JSON serializable objects. @@ -312,7 +321,7 @@ class JSONFileCache(object): try: with open(actual_key) as f: return json.load(f) - except (OSError, ValueError, IOError): + except (OSError, ValueError): raise KeyError(cache_key) def __delitem__(self, cache_key): @@ -328,12 +337,15 @@ class JSONFileCache(object): try: file_content = self._dumps(value) except (TypeError, ValueError): - raise ValueError("Value cannot be cached, must be " - "JSON serializable: %s" % value) + raise ValueError( + f"Value cannot be cached, must be " + f"JSON serializable: {value}" + ) if not os.path.isdir(self._working_dir): os.makedirs(self._working_dir) - with os.fdopen(os.open(full_key, - os.O_WRONLY | os.O_CREAT, 0o600), 'w') as f: + with os.fdopen( + os.open(full_key, os.O_WRONLY | os.O_CREAT, 0o600), 'w' + ) as f: f.truncate() f.write(file_content) @@ -342,7 +354,7 @@ class JSONFileCache(object): return full_path -class Credentials(object): +class Credentials: """ Holds the credentials needed to authenticate requests. @@ -353,8 +365,7 @@ class Credentials(object): were found. """ - def __init__(self, access_key, secret_key, token=None, - method=None): + def __init__(self, access_key, secret_key, token=None, method=None): self.access_key = access_key self.secret_key = secret_key self.token = token @@ -376,9 +387,9 @@ class Credentials(object): self.secret_key = botocore.compat.ensure_unicode(self.secret_key) def get_frozen_credentials(self): - return ReadOnlyCredentials(self.access_key, - self.secret_key, - self.token) + return ReadOnlyCredentials( + self.access_key, self.secret_key, self.token + ) class RefreshableCredentials(Credentials): @@ -394,6 +405,7 @@ class RefreshableCredentials(Credentials): were found. :param function time_fetcher: Callback function to retrieve current time. """ + # The time at which we'll attempt to refresh, but not # block if someone else is refreshing. _advisory_refresh_timeout = _DEFAULT_ADVISORY_REFRESH_TIMEOUT @@ -401,9 +413,16 @@ class RefreshableCredentials(Credentials): # refreshed credentials. _mandatory_refresh_timeout = _DEFAULT_MANDATORY_REFRESH_TIMEOUT - def __init__(self, access_key, secret_key, token, - expiry_time, refresh_using, method, - time_fetcher=_local_now): + def __init__( + self, + access_key, + secret_key, + token, + expiry_time, + refresh_using, + method, + time_fetcher=_local_now, + ): self._refresh_using = refresh_using self._access_key = access_key self._secret_key = secret_key @@ -413,7 +432,8 @@ class RefreshableCredentials(Credentials): self._refresh_lock = threading.Lock() self.method = method self._frozen_credentials = ReadOnlyCredentials( - access_key, secret_key, token) + access_key, secret_key, token + ) self._normalize() def _normalize(self): @@ -428,7 +448,7 @@ class RefreshableCredentials(Credentials): token=metadata['token'], expiry_time=cls._expiry_datetime(metadata['expiry_time']), method=method, - refresh_using=refresh_using + refresh_using=refresh_using, ) return instance @@ -529,7 +549,8 @@ class RefreshableCredentials(Credentials): if not self.refresh_needed(self._advisory_refresh_timeout): return is_mandatory_refresh = self.refresh_needed( - self._mandatory_refresh_timeout) + self._mandatory_refresh_timeout + ) self._protected_refresh(is_mandatory=is_mandatory_refresh) return finally: @@ -549,9 +570,12 @@ class RefreshableCredentials(Credentials): metadata = self._refresh_using() except Exception: period_name = 'mandatory' if is_mandatory else 'advisory' - logger.warning("Refreshing temporary credentials failed " - "during %s refresh period.", - period_name, exc_info=True) + logger.warning( + "Refreshing temporary credentials failed " + "during %s refresh period.", + period_name, + exc_info=True, + ) if is_mandatory: # If this is a mandatory refresh, then # all errors that occur when we attempt to refresh @@ -563,15 +587,18 @@ class RefreshableCredentials(Credentials): return self._set_from_data(metadata) self._frozen_credentials = ReadOnlyCredentials( - self._access_key, self._secret_key, self._token) + self._access_key, self._secret_key, self._token + ) if self._is_expired(): # We successfully refreshed credentials but for whatever # reason, our refreshing function returned credentials # that are still expired. In this scenario, the only # thing we can do is let the user know and raise # an exception. - msg = ("Credentials were refreshed, but the " - "refreshed credentials are still expired.") + msg = ( + "Credentials were refreshed, but the " + "refreshed credentials are still expired." + ) logger.warning(msg) raise RuntimeError(msg) @@ -597,8 +624,9 @@ class RefreshableCredentials(Credentials): self.secret_key = data['secret_key'] self.token = data['token'] self._expiry_time = parse(data['expiry_time']) - logger.debug("Retrieved credentials will expire at: %s", - self._expiry_time) + logger.debug( + "Retrieved credentials will expire at: %s", self._expiry_time + ) self._normalize() def get_frozen_credentials(self): @@ -644,6 +672,7 @@ class DeferredRefreshableCredentials(RefreshableCredentials): refresh_using will be called upon first access. """ + def __init__(self, refresh_using, method, time_fetcher=_local_now): self._refresh_using = refresh_using self._access_key = None @@ -658,12 +687,10 @@ class DeferredRefreshableCredentials(RefreshableCredentials): def refresh_needed(self, refresh_in=None): if self._frozen_credentials is None: return True - return super(DeferredRefreshableCredentials, self).refresh_needed( - refresh_in - ) + return super().refresh_needed(refresh_in) -class CachedCredentialFetcher(object): +class CachedCredentialFetcher: DEFAULT_EXPIRY_WINDOW_SECONDS = 60 * 15 def __init__(self, cache=None, expiry_window_seconds=None): @@ -733,8 +760,14 @@ class CachedCredentialFetcher(object): class BaseAssumeRoleCredentialFetcher(CachedCredentialFetcher): - def __init__(self, client_creator, role_arn, extra_args=None, - cache=None, expiry_window_seconds=None): + def __init__( + self, + client_creator, + role_arn, + extra_args=None, + cache=None, + expiry_window_seconds=None, + ): self._client_creator = client_creator self._role_arn = role_arn @@ -749,9 +782,7 @@ class BaseAssumeRoleCredentialFetcher(CachedCredentialFetcher): if not self._role_session_name: self._generate_assume_role_name() - super(BaseAssumeRoleCredentialFetcher, self).__init__( - cache, expiry_window_seconds - ) + super().__init__(cache, expiry_window_seconds) def _generate_assume_role_name(self): self._role_session_name = 'botocore-session-%s' % (int(time.time())) @@ -782,9 +813,16 @@ class BaseAssumeRoleCredentialFetcher(CachedCredentialFetcher): class AssumeRoleCredentialFetcher(BaseAssumeRoleCredentialFetcher): - def __init__(self, client_creator, source_credentials, role_arn, - extra_args=None, mfa_prompter=None, cache=None, - expiry_window_seconds=None): + def __init__( + self, + client_creator, + source_credentials, + role_arn, + extra_args=None, + mfa_prompter=None, + cache=None, + expiry_window_seconds=None, + ): """ :type client_creator: callable :param client_creator: A callable that creates a client taking @@ -821,9 +859,12 @@ class AssumeRoleCredentialFetcher(BaseAssumeRoleCredentialFetcher): if self._mfa_prompter is None: self._mfa_prompter = getpass.getpass - super(AssumeRoleCredentialFetcher, self).__init__( - client_creator, role_arn, extra_args=extra_args, - cache=cache, expiry_window_seconds=expiry_window_seconds + super().__init__( + client_creator, + role_arn, + extra_args=extra_args, + cache=cache, + expiry_window_seconds=expiry_window_seconds, ) def _get_credentials(self): @@ -862,10 +903,17 @@ class AssumeRoleCredentialFetcher(BaseAssumeRoleCredentialFetcher): class AssumeRoleWithWebIdentityCredentialFetcher( - BaseAssumeRoleCredentialFetcher + BaseAssumeRoleCredentialFetcher ): - def __init__(self, client_creator, web_identity_token_loader, role_arn, - extra_args=None, cache=None, expiry_window_seconds=None): + def __init__( + self, + client_creator, + web_identity_token_loader, + role_arn, + extra_args=None, + cache=None, + expiry_window_seconds=None, + ): """ :type client_creator: callable :param client_creator: A callable that creates a client taking @@ -895,9 +943,12 @@ class AssumeRoleWithWebIdentityCredentialFetcher( """ self._web_identity_token_loader = web_identity_token_loader - super(AssumeRoleWithWebIdentityCredentialFetcher, self).__init__( - client_creator, role_arn, extra_args=extra_args, - cache=cache, expiry_window_seconds=expiry_window_seconds + super().__init__( + client_creator, + role_arn, + extra_args=extra_args, + cache=cache, + expiry_window_seconds=expiry_window_seconds, ) def _get_credentials(self): @@ -918,7 +969,7 @@ class AssumeRoleWithWebIdentityCredentialFetcher( return assume_role_kwargs -class CredentialProvider(object): +class CredentialProvider: # A short name to identify the provider within botocore. METHOD = None @@ -958,8 +1009,9 @@ class CredentialProvider(object): try: found.append(mapping[key_name]) except KeyError: - raise PartialCredentialsError(provider=self.METHOD, - cred_var=key_name) + raise PartialCredentialsError( + provider=self.METHOD, cred_var=key_name + ) return found @@ -983,34 +1035,38 @@ class ProcessProvider(CredentialProvider): return RefreshableCredentials.create_from_metadata( creds_dict, lambda: self._retrieve_credentials_using(credential_process), - self.METHOD + self.METHOD, ) return Credentials( access_key=creds_dict['access_key'], secret_key=creds_dict['secret_key'], token=creds_dict.get('token'), - method=self.METHOD + method=self.METHOD, ) def _retrieve_credentials_using(self, credential_process): # We're not using shell=True, so we need to pass the # command and all arguments as a list. process_list = compat_shell_split(credential_process) - p = self._popen(process_list, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) + p = self._popen( + process_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) stdout, stderr = p.communicate() if p.returncode != 0: raise CredentialRetrievalError( - provider=self.METHOD, error_msg=stderr.decode('utf-8')) + provider=self.METHOD, error_msg=stderr.decode('utf-8') + ) parsed = botocore.compat.json.loads(stdout.decode('utf-8')) version = parsed.get('Version', '<Version key not provided>') if version != 1: raise CredentialRetrievalError( provider=self.METHOD, - error_msg=("Unsupported version '%s' for credential process " - "provider, supported versions: 1" % version)) + error_msg=( + f"Unsupported version '{version}' for credential process " + f"provider, supported versions: 1" + ), + ) try: return { 'access_key': parsed['AccessKeyId'], @@ -1021,15 +1077,16 @@ class ProcessProvider(CredentialProvider): except KeyError as e: raise CredentialRetrievalError( provider=self.METHOD, - error_msg="Missing required key in response: %s" % e + error_msg=f"Missing required key in response: {e}", ) @property def _credential_process(self): if self._loaded_config is None: self._loaded_config = self._load_config() - profile_config = self._loaded_config.get( - 'profiles', {}).get(self._profile_name, {}) + profile_config = self._loaded_config.get('profiles', {}).get( + self._profile_name, {} + ) return profile_config.get('credential_process') @@ -1048,8 +1105,9 @@ class InstanceMetadataProvider(CredentialProvider): metadata = fetcher.retrieve_iam_role_credentials() if not metadata: return None - logger.debug('Found credentials from IAM Role: %s', - metadata['role_name']) + logger.debug( + 'Found credentials from IAM Role: %s', metadata['role_name'] + ) # We manually set the data here, since we already made the request & # have it. When the expiry is hit, the credentials will auto-refresh # themselves. @@ -1098,15 +1156,17 @@ class EnvProvider(CredentialProvider): var_mapping['expiry_time'] = self.EXPIRY_TIME else: var_mapping['access_key'] = mapping.get( - 'access_key', self.ACCESS_KEY) + 'access_key', self.ACCESS_KEY + ) var_mapping['secret_key'] = mapping.get( - 'secret_key', self.SECRET_KEY) - var_mapping['token'] = mapping.get( - 'token', self.TOKENS) + 'secret_key', self.SECRET_KEY + ) + var_mapping['token'] = mapping.get('token', self.TOKENS) if not isinstance(var_mapping['token'], list): var_mapping['token'] = [var_mapping['token']] var_mapping['expiry_time'] = mapping.get( - 'expiry_time', self.EXPIRY_TIME) + 'expiry_time', self.EXPIRY_TIME + ) return var_mapping def load(self): @@ -1125,14 +1185,19 @@ class EnvProvider(CredentialProvider): if expiry_time is not None: expiry_time = parse(expiry_time) return RefreshableCredentials( - credentials['access_key'], credentials['secret_key'], - credentials['token'], expiry_time, - refresh_using=fetcher, method=self.METHOD + credentials['access_key'], + credentials['secret_key'], + credentials['token'], + expiry_time, + refresh_using=fetcher, + method=self.METHOD, ) return Credentials( - credentials['access_key'], credentials['secret_key'], - credentials['token'], method=self.METHOD + credentials['access_key'], + credentials['secret_key'], + credentials['token'], + method=self.METHOD, ) else: return None @@ -1148,13 +1213,15 @@ class EnvProvider(CredentialProvider): access_key = environ.get(mapping['access_key'], '') if not access_key: raise PartialCredentialsError( - provider=method, cred_var=mapping['access_key']) + provider=method, cred_var=mapping['access_key'] + ) credentials['access_key'] = access_key secret_key = environ.get(mapping['secret_key'], '') if not secret_key: raise PartialCredentialsError( - provider=method, cred_var=mapping['secret_key']) + provider=method, cred_var=mapping['secret_key'] + ) credentials['secret_key'] = secret_key credentials['token'] = None @@ -1170,7 +1237,8 @@ class EnvProvider(CredentialProvider): credentials['expiry_time'] = expiry_time if require_expiry and not expiry_time: raise PartialCredentialsError( - provider=method, cred_var=mapping['expiry_time']) + provider=method, cred_var=mapping['expiry_time'] + ) return credentials @@ -1199,7 +1267,8 @@ class OriginalEC2Provider(CredentialProvider): """ if 'AWS_CREDENTIAL_FILE' in self._environ: full_path = os.path.expanduser( - self._environ['AWS_CREDENTIAL_FILE']) + self._environ['AWS_CREDENTIAL_FILE'] + ) creds = self._parser(full_path) if self.ACCESS_KEY in creds: logger.info('Found credentials in AWS_CREDENTIAL_FILE.') @@ -1239,13 +1308,17 @@ class SharedCredentialProvider(CredentialProvider): if self._profile_name in available_creds: config = available_creds[self._profile_name] if self.ACCESS_KEY in config: - logger.info("Found credentials in shared credentials file: %s", - self._creds_filename) + logger.info( + "Found credentials in shared credentials file: %s", + self._creds_filename, + ) access_key, secret_key = self._extract_creds_from_mapping( - config, self.ACCESS_KEY, self.SECRET_KEY) + config, self.ACCESS_KEY, self.SECRET_KEY + ) token = self._get_session_token(config) - return Credentials(access_key, secret_key, token, - method=self.METHOD) + return Credentials( + access_key, secret_key, token, method=self.METHOD + ) def _get_session_token(self, config): for token_envvar in self.TOKENS: @@ -1255,6 +1328,7 @@ class SharedCredentialProvider(CredentialProvider): class ConfigProvider(CredentialProvider): """INI based config provider with profile sections.""" + METHOD = 'config-file' CANONICAL_NAME = 'SharedConfig' @@ -1292,13 +1366,17 @@ class ConfigProvider(CredentialProvider): if self._profile_name in full_config['profiles']: profile_config = full_config['profiles'][self._profile_name] if self.ACCESS_KEY in profile_config: - logger.info("Credentials found in config file: %s", - self._config_filename) + logger.info( + "Credentials found in config file: %s", + self._config_filename, + ) access_key, secret_key = self._extract_creds_from_mapping( - profile_config, self.ACCESS_KEY, self.SECRET_KEY) + profile_config, self.ACCESS_KEY, self.SECRET_KEY + ) token = self._get_session_token(profile_config) - return Credentials(access_key, secret_key, token, - method=self.METHOD) + return Credentials( + access_key, secret_key, token, method=self.METHOD + ) else: return None @@ -1342,12 +1420,15 @@ class BotoProvider(CredentialProvider): if 'Credentials' in config: credentials = config['Credentials'] if self.ACCESS_KEY in credentials: - logger.info("Found credentials in boto config file: %s", - filename) + logger.info( + "Found credentials in boto config file: %s", filename + ) access_key, secret_key = self._extract_creds_from_mapping( - credentials, self.ACCESS_KEY, self.SECRET_KEY) - return Credentials(access_key, secret_key, - method=self.METHOD) + credentials, self.ACCESS_KEY, self.SECRET_KEY + ) + return Credentials( + access_key, secret_key, method=self.METHOD + ) class AssumeRoleProvider(CredentialProvider): @@ -1365,9 +1446,16 @@ class AssumeRoleProvider(CredentialProvider): # EXPIRY_WINDOW. EXPIRY_WINDOW_SECONDS = 60 * 15 - def __init__(self, load_config, client_creator, cache, profile_name, - prompter=getpass.getpass, credential_sourcer=None, - profile_provider_builder=None): + def __init__( + self, + load_config, + client_creator, + cache, + profile_name, + prompter=getpass.getpass, + credential_sourcer=None, + profile_provider_builder=None, + ): """ :type load_config: callable :param load_config: A function that accepts no arguments, and @@ -1429,7 +1517,8 @@ class AssumeRoleProvider(CredentialProvider): def _has_assume_role_config_vars(self, profile): return ( - self.ROLE_CONFIG_VAR in profile and + self.ROLE_CONFIG_VAR in profile + and # We need to ensure this provider doesn't look at a profile when # the profile has configuration for web identity. Simply relying on # the order in the credential chain is insufficient as it doesn't @@ -1478,7 +1567,7 @@ class AssumeRoleProvider(CredentialProvider): return DeferredRefreshableCredentials( method=self.METHOD, refresh_using=refresher, - time_fetcher=_local_now + time_fetcher=_local_now, ) def _get_role_config(self, profile_name): @@ -1500,7 +1589,7 @@ class AssumeRoleProvider(CredentialProvider): 'mfa_serial': mfa_serial, 'role_session_name': role_session_name, 'source_profile': source_profile, - 'credential_source': credential_source + 'credential_source': credential_source, } if duration_seconds is not None: @@ -1521,11 +1610,10 @@ class AssumeRoleProvider(CredentialProvider): elif credential_source is None and source_profile is None: raise PartialCredentialsError( provider=self.METHOD, - cred_var='source_profile or credential_source' + cred_var='source_profile or credential_source', ) elif credential_source is not None: - self._validate_credential_source( - profile_name, credential_source) + self._validate_credential_source(profile_name, credential_source) else: self._validate_source_profile(profile_name, source_profile) @@ -1533,32 +1621,38 @@ class AssumeRoleProvider(CredentialProvider): def _validate_credential_source(self, parent_profile, credential_source): if self._credential_sourcer is None: - raise InvalidConfigError(error_msg=( - 'The credential_source "%s" is specified in profile "%s", ' - 'but no source provider was configured.' % ( - credential_source, parent_profile) - )) + raise InvalidConfigError( + error_msg=( + f"The credential_source \"{credential_source}\" is specified " + f"in profile \"{parent_profile}\", " + f"but no source provider was configured." + ) + ) if not self._credential_sourcer.is_supported(credential_source): - raise InvalidConfigError(error_msg=( - 'The credential source "%s" referenced in profile "%s" is not ' - 'valid.' % (credential_source, parent_profile) - )) + raise InvalidConfigError( + error_msg=( + f"The credential source \"{credential_source}\" referenced " + f"in profile \"{parent_profile}\" is not valid." + ) + ) def _source_profile_has_credentials(self, profile): - return any([ - self._has_static_credentials(profile), - self._has_assume_role_config_vars(profile), - ]) + return any( + [ + self._has_static_credentials(profile), + self._has_assume_role_config_vars(profile), + ] + ) - def _validate_source_profile(self, parent_profile_name, - source_profile_name): + def _validate_source_profile( + self, parent_profile_name, source_profile_name + ): profiles = self._loaded_config.get('profiles', {}) if source_profile_name not in profiles: raise InvalidConfigError( error_msg=( - 'The source_profile "%s" referenced in ' - 'the profile "%s" does not exist.' % ( - source_profile_name, parent_profile_name) + f"The source_profile \"{source_profile_name}\" referenced in " + f"the profile \"{parent_profile_name}\" does not exist." ) ) @@ -1574,7 +1668,7 @@ class AssumeRoleProvider(CredentialProvider): if source_profile_name != parent_profile_name: raise InfiniteLoopConfigError( source_profile=source_profile_name, - visited_profiles=self._visited_profiles + visited_profiles=self._visited_profiles, ) # A profile is allowed to reference itself so that it can source @@ -1585,7 +1679,7 @@ class AssumeRoleProvider(CredentialProvider): if not self._has_static_credentials(source_profile): raise InfiniteLoopConfigError( source_profile=source_profile_name, - visited_profiles=self._visited_profiles + visited_profiles=self._visited_profiles, ) def _has_static_credentials(self, profile): @@ -1607,15 +1701,18 @@ class AssumeRoleProvider(CredentialProvider): profiles = self._loaded_config.get('profiles', {}) profile = profiles[profile_name] - if self._has_static_credentials(profile) and \ - not self._profile_provider_builder: + if ( + self._has_static_credentials(profile) + and not self._profile_provider_builder + ): # This is only here for backwards compatibility. If this provider # isn't given a profile provider builder we still want to be able # handle the basic static credential case as we would before the # provile provider builder parameter was added. return self._resolve_static_credentials_from_profile(profile) - elif self._has_static_credentials(profile) or \ - not self._has_assume_role_config_vars(profile): + elif self._has_static_credentials( + profile + ) or not self._has_assume_role_config_vars(profile): profile_providers = self._profile_provider_builder.providers( profile_name=profile_name, disable_env_vars=True, @@ -1638,23 +1735,26 @@ class AssumeRoleProvider(CredentialProvider): return Credentials( access_key=profile['aws_access_key_id'], secret_key=profile['aws_secret_access_key'], - token=profile.get('aws_session_token') + token=profile.get('aws_session_token'), ) except KeyError as e: raise PartialCredentialsError( - provider=self.METHOD, cred_var=str(e)) + provider=self.METHOD, cred_var=str(e) + ) - def _resolve_credentials_from_source(self, credential_source, - profile_name): + def _resolve_credentials_from_source( + self, credential_source, profile_name + ): credentials = self._credential_sourcer.source_credentials( - credential_source) + credential_source + ) if credentials is None: raise CredentialRetrievalError( provider=credential_source, error_msg=( 'No credentials found in credential_source referenced ' 'in profile %s' % profile_name - ) + ), ) return credentials @@ -1669,13 +1769,13 @@ class AssumeRoleWithWebIdentityProvider(CredentialProvider): } def __init__( - self, - load_config, - client_creator, - profile_name, - cache=None, - disable_env_vars=False, - token_loader_cls=None, + self, + load_config, + client_creator, + profile_name, + cache=None, + disable_env_vars=False, + token_loader_cls=None, ): self.cache = cache self._load_config = load_config @@ -1748,7 +1848,7 @@ class AssumeRoleWithWebIdentityProvider(CredentialProvider): ) -class CanonicalNameCredentialSourcer(object): +class CanonicalNameCredentialSourcer: def __init__(self, providers): self._providers = providers @@ -1877,20 +1977,21 @@ class ContainerProvider(CredentialProvider): def _build_headers(self): auth_token = self._environ.get(self.ENV_VAR_AUTH_TOKEN) if auth_token is not None: - return { - 'Authorization': auth_token - } + return {'Authorization': auth_token} def _create_fetcher(self, full_uri, headers): def fetch_creds(): try: response = self._fetcher.retrieve_full_uri( - full_uri, headers=headers) + full_uri, headers=headers + ) except MetadataRetrievalError as e: - logger.debug("Error retrieving container metadata: %s", e, - exc_info=True) - raise CredentialRetrievalError(provider=self.METHOD, - error_msg=str(e)) + logger.debug( + "Error retrieving container metadata: %s", e, exc_info=True + ) + raise CredentialRetrievalError( + provider=self.METHOD, error_msg=str(e) + ) return { 'access_key': response['AccessKeyId'], 'secret_key': response['SecretAccessKey'], @@ -1904,7 +2005,7 @@ class ContainerProvider(CredentialProvider): return self.ENV_VAR in self._environ -class CredentialResolver(object): +class CredentialResolver: def __init__(self, providers): """ @@ -2007,18 +2108,24 @@ class CredentialResolver(object): class SSOCredentialFetcher(CachedCredentialFetcher): _UTC_DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ' - def __init__(self, start_url, sso_region, role_name, account_id, - client_creator, token_loader=None, cache=None, - expiry_window_seconds=None): + def __init__( + self, + start_url, + sso_region, + role_name, + account_id, + client_creator, + token_loader=None, + cache=None, + expiry_window_seconds=None, + ): self._client_creator = client_creator self._sso_region = sso_region self._role_name = role_name self._account_id = account_id self._start_url = start_url self._token_loader = token_loader - super(SSOCredentialFetcher, self).__init__( - cache, expiry_window_seconds - ) + super().__init__(cache, expiry_window_seconds) def _create_cache_key(self): """Create a predictable cache key for the current configuration. @@ -2071,7 +2178,7 @@ class SSOCredentialFetcher(CachedCredentialFetcher): 'SecretAccessKey': credentials['secretAccessKey'], 'SessionToken': credentials['sessionToken'], 'Expiration': self._parse_timestamp(credentials['expiration']), - } + }, } return credentials @@ -2089,8 +2196,14 @@ class SSOProvider(CredentialProvider): 'sso_account_id', ] - def __init__(self, load_config, client_creator, profile_name, - cache=None, token_cache=None): + def __init__( + self, + load_config, + client_creator, + profile_name, + cache=None, + token_cache=None, + ): if token_cache is None: token_cache = JSONFileCache(self._SSO_TOKEN_CACHE_DIR) self._token_cache = token_cache diff --git a/contrib/python/botocore/py3/botocore/crt/auth.py b/contrib/python/botocore/py3/botocore/crt/auth.py index a776bc1e66..1fd5567505 100644 --- a/contrib/python/botocore/py3/botocore/crt/auth.py +++ b/contrib/python/botocore/py3/botocore/crt/auth.py @@ -44,7 +44,8 @@ class CrtSigV4Auth(BaseSigner): # Use utcnow() because that's what gets mocked by tests, but set # timezone because CRT assumes naive datetime is local time. datetime_now = datetime.datetime.utcnow().replace( - tzinfo=datetime.timezone.utc) + tzinfo=datetime.timezone.utc + ) # Use existing 'X-Amz-Content-SHA256' header if able existing_sha256 = self._get_existing_sha256(request) @@ -54,7 +55,8 @@ class CrtSigV4Auth(BaseSigner): credentials_provider = awscrt.auth.AwsCredentialsProvider.new_static( access_key_id=self.credentials.access_key, secret_access_key=self.credentials.secret_key, - session_token=self.credentials.token) + session_token=self.credentials.token, + ) if self._is_streaming_checksum_payload(request): explicit_payload = STREAMING_UNSIGNED_PAYLOAD_TRAILER @@ -67,8 +69,9 @@ class CrtSigV4Auth(BaseSigner): explicit_payload = UNSIGNED_PAYLOAD if self._should_add_content_sha256_header(explicit_payload): - body_header = \ + body_header = ( awscrt.auth.AwsSignedBodyHeaderType.X_AMZ_CONTENT_SHA_256 + ) else: body_header = awscrt.auth.AwsSignedBodyHeaderType.NONE @@ -98,10 +101,10 @@ class CrtSigV4Auth(BaseSigner): array = [] for (param, value) in aws_request.params.items(): value = str(value) - array.append('%s=%s' % (param, value)) + array.append(f'{param}={value}') crt_path = crt_path + '?' + '&'.join(array) elif url_parts.query: - crt_path = '%s?%s' % (crt_path, url_parts.query) + crt_path = f'{crt_path}?{url_parts.query}' crt_headers = awscrt.http.HttpHeaders(aws_request.headers.items()) @@ -117,13 +120,15 @@ class CrtSigV4Auth(BaseSigner): method=aws_request.method, path=crt_path, headers=crt_headers, - body_stream=crt_body_stream) + body_stream=crt_body_stream, + ) return crt_request def _apply_signing_changes(self, aws_request, signed_crt_request): # Apply changes from signed CRT request to the AWSRequest aws_request.headers = HTTPHeaders.from_pairs( - list(signed_crt_request.headers)) + list(signed_crt_request.headers) + ) def _should_sign_header(self, name, **kwargs): return name.lower() not in SIGNED_HEADERS_BLACKLIST @@ -192,8 +197,10 @@ class CrtS3SigV4Auth(CrtSigV4Auth): algorithm = checksum_context.get('request_algorithm') if isinstance(algorithm, dict) and algorithm.get('in') == 'header': checksum_header = algorithm['name'] - if not request.url.startswith('https') or \ - checksum_header not in request.headers: + if ( + not request.url.startswith('https') + or checksum_header not in request.headers + ): return True # If the input is streaming we disable body signing by default. @@ -234,7 +241,8 @@ class CrtSigV4AsymAuth(BaseSigner): # Use utcnow() because that's what gets mocked by tests, but set # timezone because CRT assumes naive datetime is local time. datetime_now = datetime.datetime.utcnow().replace( - tzinfo=datetime.timezone.utc) + tzinfo=datetime.timezone.utc + ) # Use existing 'X-Amz-Content-SHA256' header if able existing_sha256 = self._get_existing_sha256(request) @@ -244,7 +252,8 @@ class CrtSigV4AsymAuth(BaseSigner): credentials_provider = awscrt.auth.AwsCredentialsProvider.new_static( access_key_id=self.credentials.access_key, secret_access_key=self.credentials.secret_key, - session_token=self.credentials.token) + session_token=self.credentials.token, + ) if self._is_streaming_checksum_payload(request): explicit_payload = STREAMING_UNSIGNED_PAYLOAD_TRAILER @@ -257,8 +266,9 @@ class CrtSigV4AsymAuth(BaseSigner): explicit_payload = UNSIGNED_PAYLOAD if self._should_add_content_sha256_header(explicit_payload): - body_header = \ + body_header = ( awscrt.auth.AwsSignedBodyHeaderType.X_AMZ_CONTENT_SHA_256 + ) else: body_header = awscrt.auth.AwsSignedBodyHeaderType.NONE @@ -288,10 +298,10 @@ class CrtSigV4AsymAuth(BaseSigner): array = [] for (param, value) in aws_request.params.items(): value = str(value) - array.append('%s=%s' % (param, value)) + array.append(f'{param}={value}') crt_path = crt_path + '?' + '&'.join(array) elif url_parts.query: - crt_path = '%s?%s' % (crt_path, url_parts.query) + crt_path = f'{crt_path}?{url_parts.query}' crt_headers = awscrt.http.HttpHeaders(aws_request.headers.items()) @@ -307,13 +317,15 @@ class CrtSigV4AsymAuth(BaseSigner): method=aws_request.method, path=crt_path, headers=crt_headers, - body_stream=crt_body_stream) + body_stream=crt_body_stream, + ) return crt_request def _apply_signing_changes(self, aws_request, signed_crt_request): # Apply changes from signed CRT request to the AWSRequest aws_request.headers = HTTPHeaders.from_pairs( - list(signed_crt_request.headers)) + list(signed_crt_request.headers) + ) def _should_sign_header(self, name, **kwargs): return name.lower() not in SIGNED_HEADERS_BLACKLIST @@ -382,8 +394,10 @@ class CrtS3SigV4AsymAuth(CrtSigV4AsymAuth): # to implicitly disable body signing. The combination of TLS and # content-md5 is sufficiently secure and durable for us to be # confident in the request without body signing. - if not request.url.startswith('https') or \ - 'Content-MD5' not in request.headers: + if ( + not request.url.startswith('https') + or 'Content-MD5' not in request.headers + ): return True # If the input is streaming we disable body signing by default. @@ -403,8 +417,9 @@ class CrtSigV4AsymQueryAuth(CrtSigV4AsymAuth): DEFAULT_EXPIRES = 3600 _SIGNATURE_TYPE = awscrt.auth.AwsSignatureType.HTTP_REQUEST_QUERY_PARAMS - def __init__(self, credentials, service_name, region_name, - expires=DEFAULT_EXPIRES): + def __init__( + self, credentials, service_name, region_name, expires=DEFAULT_EXPIRES + ): super().__init__(credentials, service_name, region_name) self._expiration_in_seconds = expires @@ -423,9 +438,9 @@ class CrtSigV4AsymQueryAuth(CrtSigV4AsymAuth): # parse_qs makes each value a list, but in our case we know we won't # have repeated keys so we know we have single element lists which we # can convert back to scalar values. - query_dict = dict( - [(k, v[0]) for k, v in - parse_qs(url_parts.query, keep_blank_values=True).items()]) + query_string_parts = parse_qs(url_parts.query, keep_blank_values=True) + query_dict = {k: v[0] for k, v in query_string_parts.items()} + # The spec is particular about this. It *has* to be: # https://<endpoint>?<operation params>&<auth params> # You can't mix the two types of params together, i.e just keep doing @@ -493,8 +508,9 @@ class CrtSigV4QueryAuth(CrtSigV4Auth): DEFAULT_EXPIRES = 3600 _SIGNATURE_TYPE = awscrt.auth.AwsSignatureType.HTTP_REQUEST_QUERY_PARAMS - def __init__(self, credentials, service_name, region_name, - expires=DEFAULT_EXPIRES): + def __init__( + self, credentials, service_name, region_name, expires=DEFAULT_EXPIRES + ): super().__init__(credentials, service_name, region_name) self._expiration_in_seconds = expires @@ -513,9 +529,12 @@ class CrtSigV4QueryAuth(CrtSigV4Auth): # parse_qs makes each value a list, but in our case we know we won't # have repeated keys so we know we have single element lists which we # can convert back to scalar values. - query_dict = dict( - [(k, v[0]) for k, v in - parse_qs(url_parts.query, keep_blank_values=True).items()]) + query_dict = { + k: v[0] + for k, v in parse_qs( + url_parts.query, keep_blank_values=True + ).items() + } if request.params: query_dict.update(request.params) request.params = {} @@ -593,5 +612,5 @@ CRT_AUTH_TYPE_MAPS = { 's3v4': CrtS3SigV4Auth, 's3v4-query': CrtS3SigV4QueryAuth, 's3v4a': CrtS3SigV4AsymAuth, - 's3v4a-query': CrtS3SigV4AsymQueryAuth + 's3v4a-query': CrtS3SigV4AsymQueryAuth, } diff --git a/contrib/python/botocore/py3/botocore/data/compute-optimizer/2019-11-01/service-2.json b/contrib/python/botocore/py3/botocore/data/compute-optimizer/2019-11-01/service-2.json index 6493799b2f..5ceab003f4 100644 --- a/contrib/python/botocore/py3/botocore/data/compute-optimizer/2019-11-01/service-2.json +++ b/contrib/python/botocore/py3/botocore/data/compute-optimizer/2019-11-01/service-2.json @@ -589,7 +589,7 @@ "members":{ "resourceType":{ "shape":"ResourceType", - "documentation":"<p>The target resource type of the recommendation preference to delete.</p> <p>The <code>Ec2Instance</code> option encompasses standalone instances and instances that are part of Auto Scaling groups. The <code>AutoScalingGroup</code> option encompasses only instances that are part of an Auto Scaling group.</p>" + "documentation":"<p>The target resource type of the recommendation preference to delete.</p> <p>The <code>Ec2Instance</code> option encompasses standalone instances and instances that are part of Auto Scaling groups. The <code>AutoScalingGroup</code> option encompasses only instances that are part of an Auto Scaling group.</p> <note> <p>The valid values for this parameter are <code>Ec2Instance</code> and <code>AutoScalingGroup</code>.</p> </note>" }, "scope":{ "shape":"Scope", @@ -1522,7 +1522,7 @@ "members":{ "resourceType":{ "shape":"ResourceType", - "documentation":"<p>The target resource type of the recommendation preference for which to return preferences.</p> <p>The <code>Ec2Instance</code> option encompasses standalone instances and instances that are part of Auto Scaling groups. The <code>AutoScalingGroup</code> option encompasses only instances that are part of an Auto Scaling group.</p>" + "documentation":"<p>The target resource type of the recommendation preference for which to return preferences.</p> <p>The <code>Ec2Instance</code> option encompasses standalone instances and instances that are part of Auto Scaling groups. The <code>AutoScalingGroup</code> option encompasses only instances that are part of an Auto Scaling group.</p> <note> <p>The valid values for this parameter are <code>Ec2Instance</code> and <code>AutoScalingGroup</code>.</p> </note>" }, "scope":{ "shape":"Scope", @@ -2134,7 +2134,7 @@ "members":{ "resourceType":{ "shape":"ResourceType", - "documentation":"<p>The target resource type of the recommendation preference to create.</p> <p>The <code>Ec2Instance</code> option encompasses standalone instances and instances that are part of Auto Scaling groups. The <code>AutoScalingGroup</code> option encompasses only instances that are part of an Auto Scaling group.</p>" + "documentation":"<p>The target resource type of the recommendation preference to create.</p> <p>The <code>Ec2Instance</code> option encompasses standalone instances and instances that are part of Auto Scaling groups. The <code>AutoScalingGroup</code> option encompasses only instances that are part of an Auto Scaling group.</p> <note> <p>The valid values for this parameter are <code>Ec2Instance</code> and <code>AutoScalingGroup</code>.</p> </note>" }, "scope":{ "shape":"Scope", diff --git a/contrib/python/botocore/py3/botocore/data/ec2/2016-11-15/service-2.json b/contrib/python/botocore/py3/botocore/data/ec2/2016-11-15/service-2.json index c7f9f448cc..eb90b2c6d5 100644 --- a/contrib/python/botocore/py3/botocore/data/ec2/2016-11-15/service-2.json +++ b/contrib/python/botocore/py3/botocore/data/ec2/2016-11-15/service-2.json @@ -3706,6 +3706,16 @@ "output":{"shape":"GetInstanceTypesFromInstanceRequirementsResult"}, "documentation":"<p>Returns a list of instance types with the specified instance attributes. You can use the response to preview the instance types without launching instances. Note that the response does not consider capacity.</p> <p>When you specify multiple parameters, you get instance types that satisfy all of the specified parameters. If you specify multiple values for a parameter, you get instance types that satisfy any of the specified values.</p> <p>For more information, see <a href=\"https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet-attribute-based-instance-type-selection.html#spotfleet-get-instance-types-from-instance-requirements\">Preview instance types with specified attributes</a>, <a href=\"https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-attribute-based-instance-type-selection.html\">Attribute-based instance type selection for EC2 Fleet</a>, <a href=\"https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet-attribute-based-instance-type-selection.html\">Attribute-based instance type selection for Spot Fleet</a>, and <a href=\"https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-placement-score.html\">Spot placement score</a> in the <i>Amazon EC2 User Guide</i>, and <a href=\"https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-asg-instance-type-requirements.html\">Creating an Auto Scaling group using attribute-based instance type selection</a> in the <i>Amazon EC2 Auto Scaling User Guide</i>.</p>" }, + "GetInstanceUefiData":{ + "name":"GetInstanceUefiData", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetInstanceUefiDataRequest"}, + "output":{"shape":"GetInstanceUefiDataResult"}, + "documentation":"<p>A binary representation of the UEFI variable store. Only non-volatile variables are stored. This is a base64 encoded and zlib compressed binary value that must be properly encoded.</p> <p>When you use <a href=\"https://docs.aws.amazon.com/cli/latest/reference/ec2/register-image.html\">register-image</a> to create an AMI, you can create an exact copy of your variable store by passing the UEFI data in the <code>UefiData</code> parameter. You can modify the UEFI data by using the <a href=\"https://github.com/awslabs/python-uefivars\">python-uefivars tool</a> on GitHub. You can use the tool to convert the UEFI data into a human-readable format (JSON), which you can inspect and modify, and then convert back into the binary format to use with register-image.</p> <p>For more information, see <a href=\"https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/uefi-secure-boot.html\">UEFI Secure Boot</a> in the <i>Amazon EC2 User Guide</i>.</p>" + }, "GetIpamAddressHistory":{ "name":"GetIpamAddressHistory", "http":{ @@ -25952,6 +25962,35 @@ } } }, + "GetInstanceUefiDataRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"<p>The ID of the instance from which to retrieve the UEFI data.</p>" + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"<p>Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is <code>DryRunOperation</code>. Otherwise, it is <code>UnauthorizedOperation</code>.</p>" + } + } + }, + "GetInstanceUefiDataResult":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"<p>The ID of the instance from which to retrieve the UEFI data.</p>", + "locationName":"instanceId" + }, + "UefiData":{ + "shape":"String", + "documentation":"<p>Base64 representation of the non-volatile UEFI variable store.</p>", + "locationName":"uefiData" + } + } + }, "GetIpamAddressHistoryRequest":{ "type":"structure", "required":[ @@ -27689,6 +27728,11 @@ "documentation":"<p>The boot mode of the image. For more information, see <a href=\"https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-boot.html\">Boot modes</a> in the <i>Amazon Elastic Compute Cloud User Guide</i>.</p>", "locationName":"bootMode" }, + "TpmSupport":{ + "shape":"TpmSupportValues", + "documentation":"<p>If the image is configured for NitroTPM support, the value is <code>v2.0</code>. For more information, see <a href=\"https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html\">NitroTPM</a> in the <i>Amazon Elastic Compute Cloud User Guide</i>.</p>", + "locationName":"tpmSupport" + }, "DeprecationTime":{ "shape":"String", "documentation":"<p>The date and time to deprecate the AMI, in UTC, in the following format: <i>YYYY</i>-<i>MM</i>-<i>DD</i>T<i>HH</i>:<i>MM</i>:<i>SS</i>Z. If you specified a value for seconds, Amazon EC2 rounds the seconds to the nearest minute.</p>", @@ -27745,6 +27789,16 @@ "documentation":"<p>The boot mode.</p>", "locationName":"bootMode" }, + "TpmSupport":{ + "shape":"AttributeValue", + "documentation":"<p>If the image is configured for NitroTPM support, the value is <code>v2.0</code>.</p>", + "locationName":"tpmSupport" + }, + "UefiData":{ + "shape":"AttributeValue", + "documentation":"<p>Base64 representation of the non-volatile UEFI variable store. To retrieve the UEFI data, use the <a href=\"https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceUefiData\">GetInstanceUefiData</a> command. You can inspect and modify the UEFI data by using the <a href=\"https://github.com/awslabs/python-uefivars\">python-uefivars tool</a> on GitHub. For more information, see <a href=\"https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/uefi-secure-boot.html\">UEFI Secure Boot</a> in the <i>Amazon Elastic Compute Cloud User Guide</i>.</p>", + "locationName":"uefiData" + }, "LastLaunchedTime":{ "shape":"AttributeValue", "documentation":"<p>The date and time, in <a href=\"http://www.iso.org/iso/iso8601\">ISO 8601 date-time format</a>, when the AMI was last used to launch an EC2 instance. When the AMI is used, there is a 24-hour delay before that usage is reported.</p> <note> <p> <code>lastLaunchedTime</code> data is available starting April 2017.</p> </note>", @@ -27764,6 +27818,8 @@ "blockDeviceMapping", "sriovNetSupport", "bootMode", + "tpmSupport", + "uefiData", "lastLaunchedTime" ] }, @@ -28947,6 +29003,11 @@ "documentation":"<p>The IPv6 address assigned to the instance.</p>", "locationName":"ipv6Address" }, + "TpmSupport":{ + "shape":"String", + "documentation":"<p>If the instance is configured for NitroTPM support, the value is <code>v2.0</code>. For more information, see <a href=\"https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html\">NitroTPM</a> in the <i>Amazon EC2 User Guide</i>.</p>", + "locationName":"tpmSupport" + }, "MaintenanceOptions":{ "shape":"InstanceMaintenanceOptions", "documentation":"<p>Provides information on the recovery and maintenance options of your instance.</p>", @@ -40427,6 +40488,14 @@ "BootMode":{ "shape":"BootModeValues", "documentation":"<p>The boot mode of the AMI. For more information, see <a href=\"https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-boot.html\">Boot modes</a> in the <i>Amazon Elastic Compute Cloud User Guide</i>.</p>" + }, + "TpmSupport":{ + "shape":"TpmSupportValues", + "documentation":"<p>Set to <code>v2.0</code> to enable Trusted Platform Module (TPM) support. For more information, see <a href=\"https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html\">NitroTPM</a> in the <i>Amazon Elastic Compute Cloud User Guide</i>.</p>" + }, + "UefiData":{ + "shape":"StringType", + "documentation":"<p>Base64 representation of the non-volatile UEFI variable store. To retrieve the UEFI data, use the <a href=\"https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceUefiData\">GetInstanceUefiData</a> command. You can inspect and modify the UEFI data by using the <a href=\"https://github.com/awslabs/python-uefivars\">python-uefivars tool</a> on GitHub. For more information, see <a href=\"https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/uefi-secure-boot.html\">UEFI Secure Boot</a> in the <i>Amazon Elastic Compute Cloud User Guide</i>.</p>" } }, "documentation":"<p>Contains the parameters for RegisterImage.</p>" @@ -46383,6 +46452,11 @@ "locationName":"item" } }, + "StringType":{ + "type":"string", + "max":64000, + "min":0 + }, "Subnet":{ "type":"structure", "members":{ @@ -47180,6 +47254,10 @@ }, "documentation":"<p>The minimum and maximum amount of total local storage, in GB.</p>" }, + "TpmSupportValues":{ + "type":"string", + "enum":["v2.0"] + }, "TrafficDirection":{ "type":"string", "enum":[ diff --git a/contrib/python/botocore/py3/botocore/data/eks/2017-11-01/service-2.json b/contrib/python/botocore/py3/botocore/data/eks/2017-11-01/service-2.json index 9b95ac5968..be459034ad 100644 --- a/contrib/python/botocore/py3/botocore/data/eks/2017-11-01/service-2.json +++ b/contrib/python/botocore/py3/botocore/data/eks/2017-11-01/service-2.json @@ -321,7 +321,7 @@ {"shape":"ServerException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"<p>Returns descriptive information about an update against your Amazon EKS cluster or associated managed node group.</p> <p>When the status of the update is <code>Succeeded</code>, the update is complete. If an update fails, the status is <code>Failed</code>, and an error detail explains the reason for the failure.</p>" + "documentation":"<p>Returns descriptive information about an update against your Amazon EKS cluster or associated managed node group or Amazon EKS add-on.</p> <p>When the status of the update is <code>Succeeded</code>, the update is complete. If an update fails, the status is <code>Failed</code>, and an error detail explains the reason for the failure.</p>" }, "DisassociateIdentityProviderConfig":{ "name":"DisassociateIdentityProviderConfig", @@ -602,7 +602,9 @@ "AL2_ARM_64", "CUSTOM", "BOTTLEROCKET_ARM_64", - "BOTTLEROCKET_x86_64" + "BOTTLEROCKET_x86_64", + "BOTTLEROCKET_ARM_64_NVIDIA", + "BOTTLEROCKET_x86_64_NVIDIA" ] }, "AccessDeniedException":{ @@ -1249,11 +1251,11 @@ }, "diskSize":{ "shape":"BoxedInteger", - "documentation":"<p>The root device disk size (in GiB) for your node group instances. The default disk size is 20 GiB. If you specify <code>launchTemplate</code>, then don't specify <code>diskSize</code>, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see <a href=\"https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html\">Launch template support</a> in the Amazon EKS User Guide.</p>" + "documentation":"<p>The root device disk size (in GiB) for your node group instances. The default disk size is 20 GiB. If you specify <code>launchTemplate</code>, then don't specify <code>diskSize</code>, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see <a href=\"https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html\">Launch template support</a> in the <i>Amazon EKS User Guide</i>.</p>" }, "subnets":{ "shape":"StringList", - "documentation":"<p>The subnets to use for the Auto Scaling group that is created for your node group. If you specify <code>launchTemplate</code>, then don't specify <a href=\"https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateNetworkInterface.html\"> <code>SubnetId</code> </a> in your launch template, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see <a href=\"https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html\">Launch template support</a> in the Amazon EKS User Guide.</p>" + "documentation":"<p>The subnets to use for the Auto Scaling group that is created for your node group. If you specify <code>launchTemplate</code>, then don't specify <a href=\"https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateNetworkInterface.html\"> <code>SubnetId</code> </a> in your launch template, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see <a href=\"https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html\">Launch template support</a> in the <i>Amazon EKS User Guide</i>.</p>" }, "instanceTypes":{ "shape":"StringList", @@ -1261,15 +1263,15 @@ }, "amiType":{ "shape":"AMITypes", - "documentation":"<p>The AMI type for your node group. GPU instance types should use the <code>AL2_x86_64_GPU</code> AMI type. Non-GPU instances should use the <code>AL2_x86_64</code> AMI type. Arm instances should use the <code>AL2_ARM_64</code> AMI type. All types use the Amazon EKS optimized Amazon Linux 2 AMI. If you specify <code>launchTemplate</code>, and your launch template uses a custom AMI, then don't specify <code>amiType</code>, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see <a href=\"https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html\">Launch template support</a> in the Amazon EKS User Guide.</p>" + "documentation":"<p>The AMI type for your node group. GPU instance types should use the <code>AL2_x86_64_GPU</code> AMI type. Non-GPU instances should use the <code>AL2_x86_64</code> AMI type. Arm instances should use the <code>AL2_ARM_64</code> AMI type. All types use the Amazon EKS optimized Amazon Linux 2 AMI. If you specify <code>launchTemplate</code>, and your launch template uses a custom AMI, then don't specify <code>amiType</code>, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see <a href=\"https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html\">Launch template support</a> in the <i>Amazon EKS User Guide</i>.</p>" }, "remoteAccess":{ "shape":"RemoteAccessConfig", - "documentation":"<p>The remote access (SSH) configuration to use with your node group. If you specify <code>launchTemplate</code>, then don't specify <code>remoteAccess</code>, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see <a href=\"https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html\">Launch template support</a> in the Amazon EKS User Guide.</p>" + "documentation":"<p>The remote access (SSH) configuration to use with your node group. If you specify <code>launchTemplate</code>, then don't specify <code>remoteAccess</code>, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see <a href=\"https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html\">Launch template support</a> in the <i>Amazon EKS User Guide</i>.</p>" }, "nodeRole":{ "shape":"String", - "documentation":"<p>The Amazon Resource Name (ARN) of the IAM role to associate with your node group. The Amazon EKS worker node <code>kubelet</code> daemon makes calls to Amazon Web Services APIs on your behalf. Nodes receive permissions for these API calls through an IAM instance profile and associated policies. Before you can launch nodes and register them into a cluster, you must create an IAM role for those nodes to use when they are launched. For more information, see <a href=\"https://docs.aws.amazon.com/eks/latest/userguide/worker_node_IAM_role.html\">Amazon EKS node IAM role</a> in the <i> <i>Amazon EKS User Guide</i> </i>. If you specify <code>launchTemplate</code>, then don't specify <a href=\"https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_IamInstanceProfile.html\"> <code>IamInstanceProfile</code> </a> in your launch template, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see <a href=\"https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html\">Launch template support</a> in the Amazon EKS User Guide.</p>" + "documentation":"<p>The Amazon Resource Name (ARN) of the IAM role to associate with your node group. The Amazon EKS worker node <code>kubelet</code> daemon makes calls to Amazon Web Services APIs on your behalf. Nodes receive permissions for these API calls through an IAM instance profile and associated policies. Before you can launch nodes and register them into a cluster, you must create an IAM role for those nodes to use when they are launched. For more information, see <a href=\"https://docs.aws.amazon.com/eks/latest/userguide/create-node-role.html\">Amazon EKS node IAM role</a> in the <i> <i>Amazon EKS User Guide</i> </i>. If you specify <code>launchTemplate</code>, then don't specify <a href=\"https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_IamInstanceProfile.html\"> <code>IamInstanceProfile</code> </a> in your launch template, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see <a href=\"https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html\">Launch template support</a> in the <i>Amazon EKS User Guide</i>.</p>" }, "labels":{ "shape":"labelsMap", @@ -1277,7 +1279,7 @@ }, "taints":{ "shape":"taintsList", - "documentation":"<p>The Kubernetes taints to be applied to the nodes in the node group.</p>" + "documentation":"<p>The Kubernetes taints to be applied to the nodes in the node group. For more information, see <a href=\"https://docs.aws.amazon.com/eks/latest/userguide/node-taints-managed-node-groups.html\">Node taints on managed node groups</a>.</p>" }, "tags":{ "shape":"TagMap", @@ -1302,11 +1304,11 @@ }, "version":{ "shape":"String", - "documentation":"<p>The Kubernetes version to use for your managed nodes. By default, the Kubernetes version of the cluster is used, and this is the only accepted specified value. If you specify <code>launchTemplate</code>, and your launch template uses a custom AMI, then don't specify <code>version</code>, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see <a href=\"https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html\">Launch template support</a> in the Amazon EKS User Guide.</p>" + "documentation":"<p>The Kubernetes version to use for your managed nodes. By default, the Kubernetes version of the cluster is used, and this is the only accepted specified value. If you specify <code>launchTemplate</code>, and your launch template uses a custom AMI, then don't specify <code>version</code>, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see <a href=\"https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html\">Launch template support</a> in the <i>Amazon EKS User Guide</i>.</p>" }, "releaseVersion":{ "shape":"String", - "documentation":"<p>The AMI version of the Amazon EKS optimized AMI to use with your node group. By default, the latest available AMI version for the node group's current Kubernetes version is used. For more information, see <a href=\"https://docs.aws.amazon.com/eks/latest/userguide/eks-linux-ami-versions.html\">Amazon EKS optimized Amazon Linux 2 AMI versions</a> in the <i>Amazon EKS User Guide</i>. If you specify <code>launchTemplate</code>, and your launch template uses a custom AMI, then don't specify <code>releaseVersion</code>, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see <a href=\"https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html\">Launch template support</a> in the Amazon EKS User Guide.</p>" + "documentation":"<p>The AMI version of the Amazon EKS optimized AMI to use with your node group. By default, the latest available AMI version for the node group's current Kubernetes version is used. For more information, see <a href=\"https://docs.aws.amazon.com/eks/latest/userguide/eks-linux-ami-versions.html\">Amazon EKS optimized Amazon Linux 2 AMI versions</a> in the <i>Amazon EKS User Guide</i>. If you specify <code>launchTemplate</code>, and your launch template uses a custom AMI, then don't specify <code>releaseVersion</code>, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see <a href=\"https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html\">Launch template support</a> in the <i>Amazon EKS User Guide</i>.</p>" } } }, @@ -1656,13 +1658,13 @@ }, "nodegroupName":{ "shape":"String", - "documentation":"<p>The name of the Amazon EKS node group associated with the update.</p>", + "documentation":"<p>The name of the Amazon EKS node group associated with the update. This parameter is required if the update is a node group update.</p>", "location":"querystring", "locationName":"nodegroupName" }, "addonName":{ "shape":"String", - "documentation":"<p>The name of the add-on. The name must match one of the names returned by <a href=\"https://docs.aws.amazon.com/eks/latest/APIReference/API_ListAddons.html\"> <code>ListAddons</code> </a>.</p>", + "documentation":"<p>The name of the add-on. The name must match one of the names returned by <a href=\"https://docs.aws.amazon.com/eks/latest/APIReference/API_ListAddons.html\"> <code>ListAddons</code> </a>. This parameter is required if the update is an add-on update.</p>", "location":"querystring", "locationName":"addonName" } @@ -1870,7 +1872,7 @@ "members":{ "type":{ "shape":"String", - "documentation":"<p>The type of the identity provider configuration.</p>" + "documentation":"<p>The type of the identity provider configuration. The only type available is <code>oidc</code>.</p>" }, "name":{ "shape":"String", @@ -1949,7 +1951,7 @@ "members":{ "code":{ "shape":"NodegroupIssueCode", - "documentation":"<p>A brief description of the error.</p> <ul> <li> <p> <b>AccessDenied</b>: Amazon EKS or one or more of your managed nodes is failing to authenticate or authorize with your Kubernetes cluster API server.</p> </li> <li> <p> <b>AsgInstanceLaunchFailures</b>: Your Auto Scaling group is experiencing failures while attempting to launch instances.</p> </li> <li> <p> <b>AutoScalingGroupNotFound</b>: We couldn't find the Auto Scaling group associated with the managed node group. You may be able to recreate an Auto Scaling group with the same settings to recover.</p> </li> <li> <p> <b>ClusterUnreachable</b>: Amazon EKS or one or more of your managed nodes is unable to to communicate with your Kubernetes cluster API server. This can happen if there are network disruptions or if API servers are timing out processing requests. </p> </li> <li> <p> <b>Ec2LaunchTemplateNotFound</b>: We couldn't find the Amazon EC2 launch template for your managed node group. You may be able to recreate a launch template with the same settings to recover.</p> </li> <li> <p> <b>Ec2LaunchTemplateVersionMismatch</b>: The Amazon EC2 launch template version for your managed node group does not match the version that Amazon EKS created. You may be able to revert to the version that Amazon EKS created to recover.</p> </li> <li> <p> <b>Ec2SecurityGroupDeletionFailure</b>: We could not delete the remote access security group for your managed node group. Remove any dependencies from the security group.</p> </li> <li> <p> <b>Ec2SecurityGroupNotFound</b>: We couldn't find the cluster security group for the cluster. You must recreate your cluster.</p> </li> <li> <p> <b>Ec2SubnetInvalidConfiguration</b>: One or more Amazon EC2 subnets specified for a node group do not automatically assign public IP addresses to instances launched into it. If you want your instances to be assigned a public IP address, then you need to enable the <code>auto-assign public IP address</code> setting for the subnet. See <a href=\"https://docs.aws.amazon.com/vpc/latest/userguide/vpc-ip-addressing.html#subnet-public-ip\">Modifying the public IPv4 addressing attribute for your subnet</a> in the Amazon VPC User Guide.</p> </li> <li> <p> <b>IamInstanceProfileNotFound</b>: We couldn't find the IAM instance profile for your managed node group. You may be able to recreate an instance profile with the same settings to recover.</p> </li> <li> <p> <b>IamNodeRoleNotFound</b>: We couldn't find the IAM role for your managed node group. You may be able to recreate an IAM role with the same settings to recover.</p> </li> <li> <p> <b>InstanceLimitExceeded</b>: Your Amazon Web Services account is unable to launch any more instances of the specified instance type. You may be able to request an Amazon EC2 instance limit increase to recover.</p> </li> <li> <p> <b>InsufficientFreeAddresses</b>: One or more of the subnets associated with your managed node group does not have enough available IP addresses for new nodes.</p> </li> <li> <p> <b>InternalFailure</b>: These errors are usually caused by an Amazon EKS server-side issue.</p> </li> <li> <p> <b>NodeCreationFailure</b>: Your launched instances are unable to register with your Amazon EKS cluster. Common causes of this failure are insufficient <a href=\"https://docs.aws.amazon.com/eks/latest/userguide/create-node-role.html\">node IAM role</a> permissions or lack of outbound internet access for the nodes. </p> </li> </ul>" + "documentation":"<p>A brief description of the error.</p> <ul> <li> <p> <b>AccessDenied</b>: Amazon EKS or one or more of your managed nodes is failing to authenticate or authorize with your Kubernetes cluster API server.</p> </li> <li> <p> <b>AsgInstanceLaunchFailures</b>: Your Auto Scaling group is experiencing failures while attempting to launch instances.</p> </li> <li> <p> <b>AutoScalingGroupNotFound</b>: We couldn't find the Auto Scaling group associated with the managed node group. You may be able to recreate an Auto Scaling group with the same settings to recover.</p> </li> <li> <p> <b>ClusterUnreachable</b>: Amazon EKS or one or more of your managed nodes is unable to to communicate with your Kubernetes cluster API server. This can happen if there are network disruptions or if API servers are timing out processing requests. </p> </li> <li> <p> <b>Ec2LaunchTemplateNotFound</b>: We couldn't find the Amazon EC2 launch template for your managed node group. You may be able to recreate a launch template with the same settings to recover.</p> </li> <li> <p> <b>Ec2LaunchTemplateVersionMismatch</b>: The Amazon EC2 launch template version for your managed node group does not match the version that Amazon EKS created. You may be able to revert to the version that Amazon EKS created to recover.</p> </li> <li> <p> <b>Ec2SecurityGroupDeletionFailure</b>: We could not delete the remote access security group for your managed node group. Remove any dependencies from the security group.</p> </li> <li> <p> <b>Ec2SecurityGroupNotFound</b>: We couldn't find the cluster security group for the cluster. You must recreate your cluster.</p> </li> <li> <p> <b>Ec2SubnetInvalidConfiguration</b>: One or more Amazon EC2 subnets specified for a node group do not automatically assign public IP addresses to instances launched into it. If you want your instances to be assigned a public IP address, then you need to enable the <code>auto-assign public IP address</code> setting for the subnet. See <a href=\"https://docs.aws.amazon.com/vpc/latest/userguide/vpc-ip-addressing.html#subnet-public-ip\">Modifying the public IPv4 addressing attribute for your subnet</a> in the <i>Amazon VPC User Guide</i>.</p> </li> <li> <p> <b>IamInstanceProfileNotFound</b>: We couldn't find the IAM instance profile for your managed node group. You may be able to recreate an instance profile with the same settings to recover.</p> </li> <li> <p> <b>IamNodeRoleNotFound</b>: We couldn't find the IAM role for your managed node group. You may be able to recreate an IAM role with the same settings to recover.</p> </li> <li> <p> <b>InstanceLimitExceeded</b>: Your Amazon Web Services account is unable to launch any more instances of the specified instance type. You may be able to request an Amazon EC2 instance limit increase to recover.</p> </li> <li> <p> <b>InsufficientFreeAddresses</b>: One or more of the subnets associated with your managed node group does not have enough available IP addresses for new nodes.</p> </li> <li> <p> <b>InternalFailure</b>: These errors are usually caused by an Amazon EKS server-side issue.</p> </li> <li> <p> <b>NodeCreationFailure</b>: Your launched instances are unable to register with your Amazon EKS cluster. Common causes of this failure are insufficient <a href=\"https://docs.aws.amazon.com/eks/latest/userguide/create-node-role.html\">node IAM role</a> permissions or lack of outbound internet access for the nodes. </p> </li> </ul>" }, "message":{ "shape":"String", @@ -1975,7 +1977,7 @@ }, "ipFamily":{ "shape":"IpFamily", - "documentation":"<p>Specify which IP version is used to assign Kubernetes Pod and Service IP addresses. If you don't specify a value, <code>ipv4</code> is used by default. You can only specify an IP family when you create a cluster and can't change this value once the cluster is created. If you specify <code>ipv6</code>, the VPC and subnets that you specify for cluster creation must have both IPv4 and IPv6 CIDR blocks assigned to them. </p> <p>You can only specify <code>ipv6</code> for 1.21 and later clusters that use version 1.10.0 or later of the Amazon VPC CNI add-on. If you specify <code>ipv6</code>, then ensure that your VPC meets the requirements and that you're familiar with the considerations listed in <a href=\"https://docs.aws.amazon.com/eks/latest/userguide/cni-ipv6.html\">Assigning IPv6 addresses to Pods and Services</a> in the Amazon EKS User Guide. If you specify <code>ipv6</code>, Kubernetes assigns Service and Pod addresses from the unique local address range (fc00::/7). You can't specify a custom IPv6 CIDR block.</p>" + "documentation":"<p>Specify which IP family is used to assign Kubernetes pod and service IP addresses. If you don't specify a value, <code>ipv4</code> is used by default. You can only specify an IP family when you create a cluster and can't change this value once the cluster is created. If you specify <code>ipv6</code>, the VPC and subnets that you specify for cluster creation must have both IPv4 and IPv6 CIDR blocks assigned to them. You can't specify <code>ipv6</code> for clusters in China Regions.</p> <p>You can only specify <code>ipv6</code> for 1.21 and later clusters that use version 1.10.1 or later of the Amazon VPC CNI add-on. If you specify <code>ipv6</code>, then ensure that your VPC meets the requirements listed in the considerations listed in <a href=\"https://docs.aws.amazon.com/eks/latest/userguide/cni-ipv6.html\">Assigning IPv6 addresses to pods and services</a> in the Amazon EKS User Guide. Kubernetes assigns services IPv6 addresses from the unique local address range (fc00::/7). You can't specify a custom IPv6 CIDR block. Pod addresses are assigned from the subnet's IPv6 CIDR.</p>" } }, "documentation":"<p>The Kubernetes network configuration for the cluster.</p>" @@ -1985,15 +1987,15 @@ "members":{ "serviceIpv4Cidr":{ "shape":"String", - "documentation":"<p>The CIDR block that Kubernetes Pod and Service IP addresses are assigned from. Kubernetes assigns addresses from an IPv4 CIDR block assigned to a subnet that the node is in. If you didn't specify a CIDR block when you created the cluster, then Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks. If this was specified, then it was specified when the cluster was created and it can't be changed.</p>" + "documentation":"<p>The CIDR block that Kubernetes pod and service IP addresses are assigned from. Kubernetes assigns addresses from an IPv4 CIDR block assigned to a subnet that the node is in. If you didn't specify a CIDR block when you created the cluster, then Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks. If this was specified, then it was specified when the cluster was created and it can't be changed.</p>" }, "serviceIpv6Cidr":{ "shape":"String", - "documentation":"<p>The CIDR block that Kubernetes Pod and Service IP addresses are assigned from if you created a 1.21 or later cluster with version 1.10.0 or later of the Amazon VPC CNI add-on and specified <code>ipv6</code> for <b>ipFamily</b> when you created the cluster. Kubernetes assigns addresses from the unique local address range (fc00::/7).</p>" + "documentation":"<p>The CIDR block that Kubernetes pod and service IP addresses are assigned from if you created a 1.21 or later cluster with version 1.10.1 or later of the Amazon VPC CNI add-on and specified <code>ipv6</code> for <b>ipFamily</b> when you created the cluster. Kubernetes assigns service addresses from the unique local address range (<code>fc00::/7</code>) because you can't specify a custom IPv6 CIDR block when you create the cluster.</p>" }, "ipFamily":{ "shape":"IpFamily", - "documentation":"<p>The IP family used to assign Kubernetes Pod and Service IP addresses. The IP family is always <code>ipv4</code>, unless you have a <code>1.21</code> or later cluster running version 1.10.0 or later of the Amazon VPC CNI add-on and specified <code>ipv6</code> when you created the cluster. </p>" + "documentation":"<p>The IP family used to assign Kubernetes pod and service IP addresses. The IP family is always <code>ipv4</code>, unless you have a <code>1.21</code> or later cluster running version 1.10.1 or later of the Amazon VPC CNI add-on and specified <code>ipv6</code> when you created the cluster. </p>" } }, "documentation":"<p>The Kubernetes network configuration for the cluster. The response contains a value for <b>serviceIpv6Cidr</b> or <b>serviceIpv4Cidr</b>, but not both. </p>" @@ -2014,7 +2016,7 @@ "documentation":"<p>The ID of the launch template.</p>" } }, - "documentation":"<p>An object representing a node group launch template specification. The launch template cannot include <a href=\"https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateNetworkInterface.html\"> <code>SubnetId</code> </a>, <a href=\"https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_IamInstanceProfile.html\"> <code>IamInstanceProfile</code> </a>, <a href=\"https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RequestSpotInstances.html\"> <code>RequestSpotInstances</code> </a>, <a href=\"https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_HibernationOptionsRequest.html\"> <code>HibernationOptions</code> </a>, or <a href=\"https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_TerminateInstances.html\"> <code>TerminateInstances</code> </a>, or the node group deployment or update will fail. For more information about launch templates, see <a href=\"https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateLaunchTemplate.html\"> <code>CreateLaunchTemplate</code> </a> in the Amazon EC2 API Reference. For more information about using launch templates with Amazon EKS, see <a href=\"https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html\">Launch template support</a> in the Amazon EKS User Guide.</p> <p>Specify either <code>name</code> or <code>id</code>, but not both.</p>" + "documentation":"<p>An object representing a node group launch template specification. The launch template cannot include <a href=\"https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateNetworkInterface.html\"> <code>SubnetId</code> </a>, <a href=\"https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_IamInstanceProfile.html\"> <code>IamInstanceProfile</code> </a>, <a href=\"https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RequestSpotInstances.html\"> <code>RequestSpotInstances</code> </a>, <a href=\"https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_HibernationOptionsRequest.html\"> <code>HibernationOptions</code> </a>, or <a href=\"https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_TerminateInstances.html\"> <code>TerminateInstances</code> </a>, or the node group deployment or update will fail. For more information about launch templates, see <a href=\"https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateLaunchTemplate.html\"> <code>CreateLaunchTemplate</code> </a> in the Amazon EC2 API Reference. For more information about using launch templates with Amazon EKS, see <a href=\"https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html\">Launch template support</a> in the <i>Amazon EKS User Guide</i>.</p> <p>Specify either <code>name</code> or <code>id</code>, but not both.</p>" }, "ListAddonsRequest":{ "type":"structure", @@ -2411,7 +2413,7 @@ }, "taints":{ "shape":"taintsList", - "documentation":"<p>The Kubernetes taints to be applied to the nodes in the node group when they are created. Effect is one of <code>No_Schedule</code>, <code>Prefer_No_Schedule</code>, or <code>No_Execute</code>. Kubernetes taints can be used together with tolerations to control how workloads are scheduled to your nodes.</p>" + "documentation":"<p>The Kubernetes taints to be applied to the nodes in the node group when they are created. Effect is one of <code>No_Schedule</code>, <code>Prefer_No_Schedule</code>, or <code>No_Execute</code>. Kubernetes taints can be used together with tolerations to control how workloads are scheduled to your nodes. For more information, see <a href=\"https://docs.aws.amazon.com/eks/latest/userguide/node-taints-managed-node-groups.html\">Node taints on managed node groups</a>.</p>" }, "resources":{ "shape":"NodegroupResources", @@ -2784,7 +2786,7 @@ "members":{ "message":{"shape":"String"} }, - "documentation":"<p>Required resources (such as Service Linked Roles) were created and are still propagating. Retry later.</p>", + "documentation":"<p>Required resources (such as service-linked roles) were created and are still propagating. Retry later.</p>", "error":{"httpStatusCode":428}, "exception":true }, @@ -2889,7 +2891,7 @@ "documentation":"<p>The effect of the taint.</p>" } }, - "documentation":"<p>A property that allows a node to repel a set of pods.</p>" + "documentation":"<p>A property that allows a node to repel a set of pods. For more information, see <a href=\"https://docs.aws.amazon.com/eks/latest/userguide/node-taints-managed-node-groups.html\">Node taints on managed node groups</a>.</p>" }, "TaintEffect":{ "type":"string", @@ -3121,7 +3123,7 @@ }, "taints":{ "shape":"UpdateTaintsPayload", - "documentation":"<p>The Kubernetes taints to be applied to the nodes in the node group after the update.</p>" + "documentation":"<p>The Kubernetes taints to be applied to the nodes in the node group after the update. For more information, see <a href=\"https://docs.aws.amazon.com/eks/latest/userguide/node-taints-managed-node-groups.html\">Node taints on managed node groups</a>.</p>" }, "scalingConfig":{ "shape":"NodegroupScalingConfig", @@ -3165,11 +3167,11 @@ }, "version":{ "shape":"String", - "documentation":"<p>The Kubernetes version to update to. If no version is specified, then the Kubernetes version of the node group does not change. You can specify the Kubernetes version of the cluster to update the node group to the latest AMI version of the cluster's Kubernetes version. If you specify <code>launchTemplate</code>, and your launch template uses a custom AMI, then don't specify <code>version</code>, or the node group update will fail. For more information about using launch templates with Amazon EKS, see <a href=\"https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html\">Launch template support</a> in the Amazon EKS User Guide.</p>" + "documentation":"<p>The Kubernetes version to update to. If no version is specified, then the Kubernetes version of the node group does not change. You can specify the Kubernetes version of the cluster to update the node group to the latest AMI version of the cluster's Kubernetes version. If you specify <code>launchTemplate</code>, and your launch template uses a custom AMI, then don't specify <code>version</code>, or the node group update will fail. For more information about using launch templates with Amazon EKS, see <a href=\"https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html\">Launch template support</a> in the <i>Amazon EKS User Guide</i>.</p>" }, "releaseVersion":{ "shape":"String", - "documentation":"<p>The AMI version of the Amazon EKS optimized AMI to use for the update. By default, the latest available AMI version for the node group's Kubernetes version is used. For more information, see <a href=\"https://docs.aws.amazon.com/eks/latest/userguide/eks-linux-ami-versions.html\">Amazon EKS optimized Amazon Linux 2 AMI versions </a> in the <i>Amazon EKS User Guide</i>. If you specify <code>launchTemplate</code>, and your launch template uses a custom AMI, then don't specify <code>releaseVersion</code>, or the node group update will fail. For more information about using launch templates with Amazon EKS, see <a href=\"https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html\">Launch template support</a> in the Amazon EKS User Guide.</p>" + "documentation":"<p>The AMI version of the Amazon EKS optimized AMI to use for the update. By default, the latest available AMI version for the node group's Kubernetes version is used. For more information, see <a href=\"https://docs.aws.amazon.com/eks/latest/userguide/eks-linux-ami-versions.html\">Amazon EKS optimized Amazon Linux 2 AMI versions </a> in the <i>Amazon EKS User Guide</i>. If you specify <code>launchTemplate</code>, and your launch template uses a custom AMI, then don't specify <code>releaseVersion</code>, or the node group update will fail. For more information about using launch templates with Amazon EKS, see <a href=\"https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html\">Launch template support</a> in the <i>Amazon EKS User Guide</i>.</p>" }, "launchTemplate":{ "shape":"LaunchTemplateSpecification", @@ -3259,7 +3261,7 @@ "documentation":"<p>Kubernetes taints to be removed.</p>" } }, - "documentation":"<p>An object representing the details of an update to a taints payload.</p>" + "documentation":"<p>An object representing the details of an update to a taints payload. For more information, see <a href=\"https://docs.aws.amazon.com/eks/latest/userguide/node-taints-managed-node-groups.html\">Node taints on managed node groups</a>.</p>" }, "UpdateType":{ "type":"string", diff --git a/contrib/python/botocore/py3/botocore/data/emr/2009-03-31/service-2.json b/contrib/python/botocore/py3/botocore/data/emr/2009-03-31/service-2.json index bad076330a..cbccec30fc 100644 --- a/contrib/python/botocore/py3/botocore/data/emr/2009-03-31/service-2.json +++ b/contrib/python/botocore/py3/botocore/data/emr/2009-03-31/service-2.json @@ -1265,6 +1265,10 @@ "PlacementGroups":{ "shape":"PlacementGroupConfigList", "documentation":"<p>Placement group configured for an Amazon EMR cluster.</p>" + }, + "OSReleaseLabel":{ + "shape":"String", + "documentation":"<p>The Amazon Linux release specified in a cluster launch RunJobFlow request. If no Amazon Linux release was specified, the default Amazon Linux release is shown in the response.</p>" } }, "documentation":"<p>The detailed description of the cluster.</p>" @@ -1773,6 +1777,10 @@ "NextToken":{ "shape":"String", "documentation":"<p>The pagination token. Reserved for future use. Currently set to null.</p>" + }, + "AvailableOSReleases":{ + "shape":"OSReleaseList", + "documentation":"<p>The list of available Amazon Linux release versions for an Amazon EMR release. Contains a Label field that is formatted as shown in <a href=\"https://docs.aws.amazon.com/AL2/latest/relnotes/relnotes-al2.html\"> <i>Amazon Linux 2 Release Notes</i> </a>. For example, <a href=\"https://docs.aws.amazon.com/AL2/latest/relnotes/relnotes-20220218.html\">2.0.20220218.1</a>.</p>" } } }, @@ -2670,6 +2678,10 @@ "shape":"ShrinkPolicy", "documentation":"<p>Policy for customizing shrink operations.</p>" }, + "ReconfigurationType":{ + "shape":"ReconfigurationType", + "documentation":"<p>Type of reconfiguration requested. Valid values are MERGE and OVERWRITE.</p>" + }, "Configurations":{ "shape":"ConfigurationList", "documentation":"<p>A list of new or modified configurations to apply for an instance group.</p>" @@ -3868,6 +3880,20 @@ "type":"list", "member":{"shape":"NotebookExecutionSummary"} }, + "OSRelease":{ + "type":"structure", + "members":{ + "Label":{ + "shape":"String", + "documentation":"<p>The Amazon Linux release specified for a cluster in the RunJobFlow request. The format is as shown in <a href=\"https://docs.aws.amazon.com/AL2/latest/relnotes/relnotes-20220218.html\"> <i>Amazon Linux 2 Release Notes</i> </a>. For example, 2.0.20220218.1.</p>" + } + }, + "documentation":"<p>The Amazon Linux release specified for a cluster in the RunJobFlow request.</p>" + }, + "OSReleaseList":{ + "type":"list", + "member":{"shape":"OSRelease"} + }, "OnDemandCapacityReservationOptions":{ "type":"structure", "members":{ @@ -4086,6 +4112,13 @@ "members":{ } }, + "ReconfigurationType":{ + "type":"string", + "enum":[ + "OVERWRITE", + "MERGE" + ] + }, "ReleaseLabelFilter":{ "type":"structure", "members":{ @@ -4299,7 +4332,11 @@ "shape":"PlacementGroupConfigList", "documentation":"<p>The specified placement group configuration for an Amazon EMR cluster.</p>" }, - "AutoTerminationPolicy":{"shape":"AutoTerminationPolicy"} + "AutoTerminationPolicy":{"shape":"AutoTerminationPolicy"}, + "OSReleaseLabel":{ + "shape":"XmlStringMaxLen256", + "documentation":"<p>Specifies a particular Amazon Linux release for all nodes in a cluster launch RunJobFlow request. If a release is not specified, Amazon EMR uses the latest validated Amazon Linux release for cluster launch.</p>" + } }, "documentation":"<p> Input to the <a>RunJobFlow</a> operation. </p>" }, diff --git a/contrib/python/botocore/py3/botocore/data/migration-hub-refactor-spaces/2021-10-26/service-2.json b/contrib/python/botocore/py3/botocore/data/migration-hub-refactor-spaces/2021-10-26/service-2.json index 0cdc50d56f..523831a28c 100644 --- a/contrib/python/botocore/py3/botocore/data/migration-hub-refactor-spaces/2021-10-26/service-2.json +++ b/contrib/python/botocore/py3/botocore/data/migration-hub-refactor-spaces/2021-10-26/service-2.json @@ -2783,5 +2783,5 @@ "pattern":"^[a-z0-9]{10}$" } }, - "documentation":"<p><fullname>Amazon Web Services Migration Hub Refactor Spaces</fullname></p> <pre><code> <p>This API reference provides descriptions, syntax, and other details about each of the actions and data types for Amazon Web Services Migration Hub Refactor Spaces (Refactor Spaces). The topic for each action shows the API request parameters and the response. Alternatively, you can use one of the Amazon Web Services SDKs to access an API that is tailored to the programming language or platform that you're using. For more information, see <a href="http://aws.amazon.com/tools/#SDKs">Amazon Web Services SDKs</a>.</p> <p>To share Refactor Spaces environments with other Amazon Web Services accounts or with Organizations and their OUs, use Resource Access Manager's <code>CreateResourceShare</code> API. See <a href="https://docs.aws.amazon.com/ram/latest/APIReference/API_CreateResourceShare.html">CreateResourceShare</a> in the <i>Amazon Web Services RAM API Reference</i>.</p> </code></pre>" + "documentation":"<p><fullname>Amazon Web Services Migration Hub Refactor Spaces</fullname></p> <pre><code> <p>This API reference provides descriptions, syntax, and other details about each of the actions and data types for Amazon Web Services Migration Hub Refactor Spaces (Refactor Spaces). The topic for each action shows the API request parameters and the response. Alternatively, you can use one of the Amazon Web Services SDKs to access an API that is tailored to the programming language or platform that you're using. For more information, see <a href="https://aws.amazon.com/tools/#SDKs">Amazon Web Services SDKs</a>.</p> <p>To share Refactor Spaces environments with other Amazon Web Services accounts or with Organizations and their OUs, use Resource Access Manager's <code>CreateResourceShare</code> API. See <a href="https://docs.aws.amazon.com/ram/latest/APIReference/API_CreateResourceShare.html">CreateResourceShare</a> in the <i>Amazon Web Services RAM API Reference</i>.</p> </code></pre>" } diff --git a/contrib/python/botocore/py3/botocore/discovery.py b/contrib/python/botocore/py3/botocore/discovery.py index 33b412304e..9c68001dea 100644 --- a/contrib/python/botocore/py3/botocore/discovery.py +++ b/contrib/python/botocore/py3/botocore/discovery.py @@ -27,12 +27,14 @@ class EndpointDiscoveryException(BotoCoreError): class EndpointDiscoveryRequired(EndpointDiscoveryException): - """ Endpoint Discovery is disabled but is required for this operation. """ + """Endpoint Discovery is disabled but is required for this operation.""" + fmt = 'Endpoint Discovery is not enabled but this operation requires it.' class EndpointDiscoveryRefreshFailed(EndpointDiscoveryException): - """ Endpoint Discovery failed to the refresh the known endpoints. """ + """Endpoint Discovery failed to the refresh the known endpoints.""" + fmt = 'Endpoint Discovery failed to refresh the required endpoints.' @@ -42,7 +44,7 @@ def block_endpoint_discovery_required_operations(model, **kwargs): raise EndpointDiscoveryRequired() -class EndpointDiscoveryModel(object): +class EndpointDiscoveryModel: def __init__(self, service_model): self._service_model = service_model @@ -61,7 +63,9 @@ class EndpointDiscoveryModel(object): def discovery_required_for(self, operation_name): try: - operation_model = self._service_model.operation_model(operation_name) + operation_model = self._service_model.operation_model( + operation_name + ) return operation_model.endpoint_discovery.get('required', False) except OperationNotFoundError: return False @@ -72,7 +76,7 @@ class EndpointDiscoveryModel(object): if not kwargs.get('Identifiers'): kwargs.pop('Operation', None) kwargs.pop('Identifiers', None) - return dict((k, v) for k, v in kwargs.items() if k in input_keys) + return {k: v for k, v in kwargs.items() if k in input_keys} def gather_identifiers(self, operation, params): return self._gather_ids(operation.input_shape, params) @@ -85,13 +89,17 @@ class EndpointDiscoveryModel(object): for member_name, member_shape in shape.members.items(): if member_shape.metadata.get('endpointdiscoveryid'): ids[member_name] = params[member_name] - elif member_shape.type_name == 'structure' and member_name in params: + elif ( + member_shape.type_name == 'structure' and member_name in params + ): self._gather_ids(member_shape, params[member_name], ids) return ids -class EndpointDiscoveryManager(object): - def __init__(self, client, cache=None, current_time=None, always_discover=True): +class EndpointDiscoveryManager: + def __init__( + self, client, cache=None, current_time=None, always_discover=True + ): if cache is None: cache = {} self._cache = cache @@ -214,7 +222,7 @@ class EndpointDiscoveryManager(object): return None -class EndpointDiscoveryHandler(object): +class EndpointDiscoveryHandler: def __init__(self, manager): self._manager = manager diff --git a/contrib/python/botocore/py3/botocore/docs/__init__.py b/contrib/python/botocore/py3/botocore/docs/__init__.py index b76f7990e8..0119aa2942 100644 --- a/contrib/python/botocore/py3/botocore/docs/__init__.py +++ b/contrib/python/botocore/py3/botocore/docs/__init__.py @@ -33,6 +33,7 @@ def generate_docs(root_dir, session): for service_name in session.get_available_services(): docs = ServiceDocumenter(service_name, session).document_service() service_doc_path = os.path.join( - services_doc_path, service_name + '.rst') + services_doc_path, f"{service_name}.rst" + ) with open(service_doc_path, 'wb') as f: f.write(docs) diff --git a/contrib/python/botocore/py3/botocore/docs/bcdoc/docstringparser.py b/contrib/python/botocore/py3/botocore/docs/bcdoc/docstringparser.py index 868bd5d891..9c34085809 100644 --- a/contrib/python/botocore/py3/botocore/docs/bcdoc/docstringparser.py +++ b/contrib/python/botocore/py3/botocore/docs/bcdoc/docstringparser.py @@ -51,12 +51,13 @@ class DocStringParser(six.moves.html_parser.HTMLParser): self.tree.add_data(data) -class HTMLTree(object): +class HTMLTree: """ A tree which handles HTML nodes. Designed to work with a python HTML parser, meaning that the current_node will be the most recently opened tag. When a tag is closed, the current_node moves up to the parent node. """ + def __init__(self, doc): self.doc = doc self.head = StemNode() @@ -93,7 +94,7 @@ class HTMLTree(object): self.head.write(self.doc) -class Node(object): +class Node: def __init__(self, parent=None): self.parent = parent @@ -103,7 +104,7 @@ class Node(object): class StemNode(Node): def __init__(self, parent=None): - super(StemNode, self).__init__(parent) + super().__init__(parent) self.children = [] def add_child(self, child): @@ -122,8 +123,9 @@ class TagNode(StemNode): """ A generic Tag node. It will verify that handlers exist before writing. """ + def __init__(self, tag, attrs=None, parent=None): - super(TagNode, self).__init__(parent) + super().__init__(parent) self.attrs = attrs self.tag = tag @@ -145,11 +147,11 @@ class TagNode(StemNode): class LineItemNode(TagNode): def __init__(self, attrs=None, parent=None): - super(LineItemNode, self).__init__('li', attrs, parent) + super().__init__('li', attrs, parent) def write(self, doc): self._lstrip(self) - super(LineItemNode, self).write(doc) + super().write(doc) def _lstrip(self, node): """ @@ -174,9 +176,10 @@ class DataNode(Node): """ A Node that contains only string data. """ + def __init__(self, data, parent=None): - super(DataNode, self).__init__(parent) - if not isinstance(data, six.string_types): + super().__init__(parent) + if not isinstance(data, str): raise ValueError("Expecting string type, %s given." % type(data)) self.data = data diff --git a/contrib/python/botocore/py3/botocore/docs/bcdoc/restdoc.py b/contrib/python/botocore/py3/botocore/docs/bcdoc/restdoc.py index 9f91e1c350..2064526722 100644 --- a/contrib/python/botocore/py3/botocore/docs/bcdoc/restdoc.py +++ b/contrib/python/botocore/py3/botocore/docs/bcdoc/restdoc.py @@ -19,8 +19,7 @@ from botocore.docs.bcdoc.style import ReSTStyle LOG = logging.getLogger('bcdocs') -class ReSTDocument(object): - +class ReSTDocument: def __init__(self, target='man'): self.style = ReSTStyle(self) self.target = target @@ -46,7 +45,7 @@ class ReSTDocument(object): """ Write content on a newline. """ - self._write('%s%s\n' % (self.style.spaces(), content)) + self._write(f'{self.style.spaces()}{content}\n') def peek_write(self): """ @@ -117,7 +116,7 @@ class DocumentStructure(ReSTDocument): :param context: A dictionary of data to store with the strucuture. These are only stored per section not the entire structure. """ - super(DocumentStructure, self).__init__(target=target) + super().__init__(target=target) self._name = name self._structure = OrderedDict() self._path = [self._name] @@ -172,8 +171,9 @@ class DocumentStructure(ReSTDocument): to the document structure it was instantiated from. """ # Add a new section - section = self.__class__(name=name, target=self.target, - context=context) + section = self.__class__( + name=name, target=self.target, context=context + ) section.path = self.path + [name] # Indent the section apporpriately as well section.style.indentation = self.style.indentation diff --git a/contrib/python/botocore/py3/botocore/docs/bcdoc/style.py b/contrib/python/botocore/py3/botocore/docs/bcdoc/style.py index 4470d65d3c..0f88f3389a 100644 --- a/contrib/python/botocore/py3/botocore/docs/bcdoc/style.py +++ b/contrib/python/botocore/py3/botocore/docs/bcdoc/style.py @@ -16,8 +16,7 @@ import logging logger = logging.getLogger('bcdocs') -class BaseStyle(object): - +class BaseStyle: def __init__(self, doc, indent_width=2): self.doc = doc self.indent_width = indent_width @@ -65,7 +64,6 @@ class BaseStyle(object): class ReSTStyle(BaseStyle): - def __init__(self, doc, indent_width=2): BaseStyle.__init__(self, doc, indent_width) self.do_p = True @@ -123,12 +121,12 @@ class ReSTStyle(BaseStyle): def ref(self, title, link=None): if link is None: link = title - self.doc.write(':doc:`%s <%s>`' % (title, link)) + self.doc.write(f':doc:`{title} <{link}>`') def _heading(self, s, border_char): border = border_char * len(s) self.new_paragraph() - self.doc.write('%s\n%s\n%s' % (border, s, border)) + self.doc.write(f'{border}\n{s}\n{border}') self.new_paragraph() def h1(self, s): @@ -219,13 +217,13 @@ class ReSTStyle(BaseStyle): self.doc.do_translation = True def link_target_definition(self, refname, link): - self.doc.writeln('.. _%s: %s' % (refname, link)) + self.doc.writeln(f'.. _{refname}: {link}') def sphinx_reference_label(self, label, text=None): if text is None: text = label if self.doc.target == 'html': - self.doc.write(':ref:`%s <%s>`' % (text, label)) + self.doc.write(f':ref:`{text} <{label}>`') else: self.doc.write(text) @@ -407,12 +405,12 @@ class ReSTStyle(BaseStyle): def external_link(self, title, link): if self.doc.target == 'html': - self.doc.write('`%s <%s>`_' % (title, link)) + self.doc.write(f'`{title} <{link}>`_') else: self.doc.write(title) def internal_link(self, title, page): if self.doc.target == 'html': - self.doc.write(':doc:`%s <%s>`' % (title, page)) + self.doc.write(f':doc:`{title} <{page}>`') else: self.doc.write(title) diff --git a/contrib/python/botocore/py3/botocore/docs/client.py b/contrib/python/botocore/py3/botocore/docs/client.py index 812f73d30c..3db909880e 100644 --- a/contrib/python/botocore/py3/botocore/docs/client.py +++ b/contrib/python/botocore/py3/botocore/docs/client.py @@ -28,7 +28,7 @@ def _allowlist_generate_presigned_url(method_name, service_name, **kwargs): return service_name in ['s3'] -class ClientDocumenter(object): +class ClientDocumenter: _CLIENT_METHODS_FILTERS = [ _allowlist_generate_presigned_url, ] @@ -84,11 +84,15 @@ class ClientDocumenter(object): section = section.add_new_section('intro') # Write out the top level description for the client. official_service_name = get_official_service_name( - self._client.meta.service_model) + self._client.meta.service_model + ) section.write( - 'A low-level client representing %s' % official_service_name) + f"A low-level client representing {official_service_name}" + ) section.style.new_line() - section.include_doc_string(self._client.meta.service_model.documentation) + section.include_doc_string( + self._client.meta.service_model.documentation + ) # Write out the client example instantiation. self._add_client_creation_example(section) @@ -99,19 +103,20 @@ class ClientDocumenter(object): section.style.new_line() class_name = self._client.__class__.__name__ for method_name in sorted(client_methods): - section.style.li(':py:meth:`~%s.Client.%s`' % ( - class_name, method_name)) + section.style.li(f":py:meth:`~{class_name}.Client.{method_name}`") def _add_class_signature(self, section): section.style.start_sphinx_py_class( - class_name='%s.Client' % self._client.__class__.__name__) + class_name=f'{self._client.__class__.__name__}.Client' + ) def _add_client_creation_example(self, section): section.style.start_codeblock() section.style.new_line() section.write( 'client = session.create_client(\'{service}\')'.format( - service=self._service_name) + service=self._service_name + ) ) section.style.end_codeblock() @@ -119,7 +124,8 @@ class ClientDocumenter(object): section = section.add_new_section('methods') for method_name in sorted(client_methods): self._add_client_method( - section, method_name, client_methods[method_name]) + section, method_name, client_methods[method_name] + ) def _add_client_method(self, section, method_name, method): section = section.add_new_section(method_name) @@ -141,7 +147,7 @@ class ClientDocumenter(object): error_section.style.new_line() client_name = self._client.__class__.__name__ for error in operation_model.error_shapes: - class_name = '%s.Client.exceptions.%s' % (client_name, error.name) + class_name = f'{client_name}.Client.exceptions.{error.name}' error_section.style.li(':py:class:`%s`' % class_name) def _add_model_driven_method(self, section, method_name): @@ -151,7 +157,9 @@ class ClientDocumenter(object): example_prefix = 'response = client.%s' % method_name document_model_driven_method( - section, method_name, operation_model, + section, + method_name, + operation_model, event_emitter=self._client.meta.events, method_description=operation_model.documentation, example_prefix=example_prefix, @@ -165,10 +173,11 @@ class ClientDocumenter(object): shared_examples = self._shared_examples.get(operation_name) if shared_examples: document_shared_examples( - section, operation_model, example_prefix, shared_examples) + section, operation_model, example_prefix, shared_examples + ) -class ClientExceptionsDocumenter(object): +class ClientExceptionsDocumenter: _USER_GUIDE_LINK = ( 'https://boto3.amazonaws.com/' 'v1/documentation/api/latest/guide/error-handling.html' @@ -176,26 +185,32 @@ class ClientExceptionsDocumenter(object): _GENERIC_ERROR_SHAPE = DocumentedShape( name='Error', type_name='structure', - documentation=( - 'Normalized access to common exception attributes.' - ), - members=OrderedDict([ - ('Code', DocumentedShape( - name='Code', - type_name='string', - documentation=( - 'An identifier specifying the exception type.' + documentation=('Normalized access to common exception attributes.'), + members=OrderedDict( + [ + ( + 'Code', + DocumentedShape( + name='Code', + type_name='string', + documentation=( + 'An identifier specifying the exception type.' + ), + ), ), - )), - ('Message', DocumentedShape( - name='Message', - type_name='string', - documentation=( - 'A descriptive message explaining why the exception ' - 'occured.' + ( + 'Message', + DocumentedShape( + name='Message', + type_name='string', + documentation=( + 'A descriptive message explaining why the exception ' + 'occured.' + ), + ), ), - )), - ]), + ] + ), ) def __init__(self, client): @@ -228,7 +243,7 @@ class ClientExceptionsDocumenter(object): def _exception_class_name(self, shape): cls_name = self._client.__class__.__name__ - return '%s.Client.exceptions.%s' % (cls_name, shape.name) + return f'{cls_name}.Client.exceptions.{shape.name}' def _add_exceptions_list(self, section): error_shapes = self._client.meta.service_model.error_shapes @@ -309,7 +324,9 @@ class ClientExceptionsDocumenter(object): event_emitter=self._client.meta.events, ) documenter.document_example( - example_section, shape, include=[self._GENERIC_ERROR_SHAPE], + example_section, + shape, + include=[self._GENERIC_ERROR_SHAPE], ) def _add_response_params(self, section, shape): @@ -323,5 +340,7 @@ class ClientExceptionsDocumenter(object): event_emitter=self._client.meta.events, ) documenter.document_params( - params_section, shape, include=[self._GENERIC_ERROR_SHAPE], + params_section, + shape, + include=[self._GENERIC_ERROR_SHAPE], ) diff --git a/contrib/python/botocore/py3/botocore/docs/docstring.py b/contrib/python/botocore/py3/botocore/docs/docstring.py index e0725fc452..93b2e6b23c 100644 --- a/contrib/python/botocore/py3/botocore/docs/docstring.py +++ b/contrib/python/botocore/py3/botocore/docs/docstring.py @@ -24,20 +24,21 @@ class LazyLoadedDocstring(str): help(). Note that all docstring classes **must** subclass from this class. It cannot be used directly as a docstring. """ + def __init__(self, *args, **kwargs): """ The args and kwargs are the same as the underlying document generation function. These just get proxied to the underlying function. """ - super(LazyLoadedDocstring, self).__init__() + super().__init__() self._gen_args = args self._gen_kwargs = kwargs self._docstring = None def __new__(cls, *args, **kwargs): # Needed in order to sub class from str with args and kwargs - return super(LazyLoadedDocstring, cls).__new__(cls) + return super().__new__(cls) def _write_docstring(self, *args, **kwargs): raise NotImplementedError( @@ -76,8 +77,8 @@ class LazyLoadedDocstring(str): # Call the document method function with the args and kwargs # passed to the class. self._write_docstring( - docstring_structure, *self._gen_args, - **self._gen_kwargs) + docstring_structure, *self._gen_args, **self._gen_kwargs + ) return docstring_structure.flush_structure().decode('utf-8') diff --git a/contrib/python/botocore/py3/botocore/docs/example.py b/contrib/python/botocore/py3/botocore/docs/example.py index c0f6bbb68a..05f50a3ce0 100644 --- a/contrib/python/botocore/py3/botocore/docs/example.py +++ b/contrib/python/botocore/py3/botocore/docs/example.py @@ -15,8 +15,9 @@ from botocore.docs.utils import py_default class BaseExampleDocumenter(ShapeDocumenter): - def document_example(self, section, shape, prefix=None, include=None, - exclude=None): + def document_example( + self, section, shape, prefix=None, include=None, exclude=None + ): """Generates an example based on a shape :param section: The section to write the documentation to. @@ -39,14 +40,19 @@ class BaseExampleDocumenter(ShapeDocumenter): if prefix is not None: section.write(prefix) self.traverse_and_document_shape( - section=section, shape=shape, history=history, - include=include, exclude=exclude) + section=section, + shape=shape, + history=history, + include=include, + exclude=exclude, + ) def document_recursive_shape(self, section, shape, **kwargs): section.write('{\'... recursive ...\'}') - def document_shape_default(self, section, shape, history, include=None, - exclude=None, **kwargs): + def document_shape_default( + self, section, shape, history, include=None, exclude=None, **kwargs + ): py_type = self._get_special_py_default(shape) if py_type is None: py_type = py_default(shape.type_name) @@ -55,8 +61,9 @@ class BaseExampleDocumenter(ShapeDocumenter): py_type = 'StreamingBody()' section.write(py_type) - def document_shape_type_string(self, section, shape, history, - include=None, exclude=None, **kwargs): + def document_shape_type_string( + self, section, shape, history, include=None, exclude=None, **kwargs + ): if 'enum' in shape.metadata: for i, enum in enumerate(shape.metadata['enum']): section.write('\'%s\'' % enum) @@ -65,23 +72,26 @@ class BaseExampleDocumenter(ShapeDocumenter): else: self.document_shape_default(section, shape, history) - def document_shape_type_list(self, section, shape, history, include=None, - exclude=None, **kwargs): + def document_shape_type_list( + self, section, shape, history, include=None, exclude=None, **kwargs + ): param_shape = shape.member list_section = section.add_new_section('list-value') self._start_nested_param(list_section, '[') param_section = list_section.add_new_section( - 'member', context={'shape': param_shape.name}) + 'member', context={'shape': param_shape.name} + ) self.traverse_and_document_shape( - section=param_section, shape=param_shape, history=history) + section=param_section, shape=param_shape, history=history + ) ending_comma_section = list_section.add_new_section('ending-comma') ending_comma_section.write(',') - ending_bracket_section = list_section.add_new_section( - 'ending-bracket') + ending_bracket_section = list_section.add_new_section('ending-bracket') self._end_nested_param(ending_bracket_section, ']') - def document_shape_type_structure(self, section, shape, history, - include=None, exclude=None, **kwargs): + def document_shape_type_structure( + self, section, shape, history, include=None, exclude=None, **kwargs + ): if not shape.members: section.write('{}') return @@ -98,29 +108,38 @@ class BaseExampleDocumenter(ShapeDocumenter): param_section.write('\'%s\': ' % param) param_shape = input_members[param] param_value_section = param_section.add_new_section( - 'member-value', context={'shape': param_shape.name}) + 'member-value', context={'shape': param_shape.name} + ) self.traverse_and_document_shape( - section=param_value_section, shape=param_shape, - history=history, name=param) + section=param_value_section, + shape=param_shape, + history=history, + name=param, + ) if i < len(input_members) - 1: ending_comma_section = param_section.add_new_section( - 'ending-comma') + 'ending-comma' + ) ending_comma_section.write(',') ending_comma_section.style.new_line() self._end_structure(section, '{', '}') - def document_shape_type_map(self, section, shape, history, - include=None, exclude=None, **kwargs): + def document_shape_type_map( + self, section, shape, history, include=None, exclude=None, **kwargs + ): map_section = section.add_new_section('map-value') self._start_nested_param(map_section, '{') value_shape = shape.value key_section = map_section.add_new_section( - 'key', context={'shape': shape.key.name}) + 'key', context={'shape': shape.key.name} + ) key_section.write('\'string\': ') value_section = map_section.add_new_section( - 'value', context={'shape': value_shape.name}) + 'value', context={'shape': value_shape.name} + ) self.traverse_and_document_shape( - section=value_section, shape=value_shape, history=history) + section=value_section, shape=value_shape, history=history + ) end_bracket_section = map_section.add_new_section('ending-bracket') self._end_nested_param(end_bracket_section, '}') @@ -161,8 +180,9 @@ class BaseExampleDocumenter(ShapeDocumenter): class ResponseExampleDocumenter(BaseExampleDocumenter): EVENT_NAME = 'response-example' - def document_shape_type_event_stream(self, section, shape, history, - **kwargs): + def document_shape_type_event_stream( + self, section, shape, history, **kwargs + ): section.write('EventStream(') self.document_shape_type_structure(section, shape, history, **kwargs) end_section = section.add_new_section('event-stream-end') @@ -172,8 +192,9 @@ class ResponseExampleDocumenter(BaseExampleDocumenter): class RequestExampleDocumenter(BaseExampleDocumenter): EVENT_NAME = 'request-example' - def document_shape_type_structure(self, section, shape, history, - include=None, exclude=None, **kwargs): + def document_shape_type_structure( + self, section, shape, history, include=None, exclude=None, **kwargs + ): param_format = '\'%s\'' operator = ': ' start = '{' @@ -196,13 +217,18 @@ class RequestExampleDocumenter(BaseExampleDocumenter): param_section.write(operator) param_shape = input_members[param] param_value_section = param_section.add_new_section( - 'member-value', context={'shape': param_shape.name}) + 'member-value', context={'shape': param_shape.name} + ) self.traverse_and_document_shape( - section=param_value_section, shape=param_shape, - history=history, name=param) + section=param_value_section, + shape=param_shape, + history=history, + name=param, + ) if i < len(input_members) - 1: ending_comma_section = param_section.add_new_section( - 'ending-comma') + 'ending-comma' + ) ending_comma_section.write(',') ending_comma_section.style.new_line() self._end_structure(section, start, end) diff --git a/contrib/python/botocore/py3/botocore/docs/method.py b/contrib/python/botocore/py3/botocore/docs/method.py index 93dc23baf5..0f7c60f6cc 100644 --- a/contrib/python/botocore/py3/botocore/docs/method.py +++ b/contrib/python/botocore/py3/botocore/docs/method.py @@ -42,8 +42,9 @@ def get_instance_public_methods(instance): return instance_methods -def document_model_driven_signature(section, name, operation_model, - include=None, exclude=None): +def document_model_driven_signature( + section, name, operation_model, include=None, exclude=None +): """Documents the signature of a model-driven method :param section: The section to write the documentation to. @@ -81,8 +82,9 @@ def document_model_driven_signature(section, name, operation_model, section.style.start_sphinx_py_method(name, signature_params) -def document_custom_signature(section, name, method, - include=None, exclude=None): +def document_custom_signature( + section, name, method, include=None, exclude=None +): """Documents the signature of a custom method :param section: The section to write the documentation to. @@ -104,7 +106,7 @@ def document_custom_signature(section, name, method, args=argspec.args[1:], varargs=argspec.varargs, varkw=argspec.varkw, - defaults=argspec.defaults + defaults=argspec.defaults, ) signature_params = signature_params.lstrip('(') signature_params = signature_params.rstrip(')') @@ -120,8 +122,7 @@ def document_custom_method(section, method_name, method): :param method: The handle to the method being documented """ - document_custom_signature( - section, method_name, method) + document_custom_signature(section, method_name, method) method_intro_section = section.add_new_section('method-intro') method_intro_section.writeln('') doc_string = inspect.getdoc(method) @@ -129,12 +130,20 @@ def document_custom_method(section, method_name, method): method_intro_section.style.write_py_doc_string(doc_string) -def document_model_driven_method(section, method_name, operation_model, - event_emitter, method_description=None, - example_prefix=None, include_input=None, - include_output=None, exclude_input=None, - exclude_output=None, document_output=True, - include_signature=True): +def document_model_driven_method( + section, + method_name, + operation_model, + event_emitter, + method_description=None, + example_prefix=None, + include_input=None, + include_output=None, + exclude_input=None, + exclude_output=None, + document_output=True, + include_signature=True, +): """Documents an individual method :param section: The section to write to @@ -174,8 +183,12 @@ def document_model_driven_method(section, method_name, operation_model, # Add the signature if specified. if include_signature: document_model_driven_signature( - section, method_name, operation_model, include=include_input, - exclude=exclude_input) + section, + method_name, + operation_model, + include=include_input, + exclude=exclude_input, + ) # Add the description for the method. method_intro_section = section.add_new_section('method-intro') @@ -185,16 +198,17 @@ def document_model_driven_method(section, method_name, operation_model, method_intro_section.writeln( 'This operation is deprecated and may not function as ' 'expected. This operation should not be used going forward ' - 'and is only kept for the purpose of backwards compatiblity.') + 'and is only kept for the purpose of backwards compatiblity.' + ) method_intro_section.style.end_danger() service_uid = operation_model.service_model.metadata.get('uid') if service_uid is not None: method_intro_section.style.new_paragraph() method_intro_section.write("See also: ") - link = '%s/%s/%s' % (AWS_DOC_BASE, service_uid, - operation_model.name) - method_intro_section.style.external_link(title="AWS API Documentation", - link=link) + link = f"{AWS_DOC_BASE}/{service_uid}/{operation_model.name}" + method_intro_section.style.external_link( + title="AWS API Documentation", link=link + ) method_intro_section.writeln('') # Add the example section. @@ -214,10 +228,15 @@ def document_model_driven_method(section, method_name, operation_model, RequestExampleDocumenter( service_name=operation_model.service_model.service_name, operation_name=operation_model.name, - event_emitter=event_emitter, context=context).document_example( - example_section, operation_model.input_shape, - prefix=example_prefix, include=include_input, - exclude=exclude_input) + event_emitter=event_emitter, + context=context, + ).document_example( + example_section, + operation_model.input_shape, + prefix=example_prefix, + include=include_input, + exclude=exclude_input, + ) else: example_section.style.new_paragraph() example_section.style.start_codeblock() @@ -229,9 +248,14 @@ def document_model_driven_method(section, method_name, operation_model, RequestParamsDocumenter( service_name=operation_model.service_model.service_name, operation_name=operation_model.name, - event_emitter=event_emitter, context=context).document_params( - request_params_section, operation_model.input_shape, - include=include_input, exclude=exclude_input) + event_emitter=event_emitter, + context=context, + ).document_params( + request_params_section, + operation_model.input_shape, + include=include_input, + exclude=exclude_input, + ) # Add the return value documentation return_section = section.add_new_section('return') @@ -266,13 +290,18 @@ def document_model_driven_method(section, method_name, operation_model, service_name=operation_model.service_model.service_name, operation_name=operation_model.name, event_emitter=event_emitter, - context=context).document_example( - return_example_section, operation_model.output_shape, - include=include_output, exclude=exclude_output) + context=context, + ).document_example( + return_example_section, + operation_model.output_shape, + include=include_output, + exclude=exclude_output, + ) # Add a description for the return value return_description_section = return_section.add_new_section( - 'description') + 'description' + ) return_description_section.style.new_line() return_description_section.style.bold('Response Structure') return_description_section.style.new_paragraph() @@ -280,8 +309,12 @@ def document_model_driven_method(section, method_name, operation_model, service_name=operation_model.service_model.service_name, operation_name=operation_model.name, event_emitter=event_emitter, - context=context).document_params( - return_description_section, operation_model.output_shape, - include=include_output, exclude=exclude_output) + context=context, + ).document_params( + return_description_section, + operation_model.output_shape, + include=include_output, + exclude=exclude_output, + ) else: return_section.write(':returns: None') diff --git a/contrib/python/botocore/py3/botocore/docs/paginator.py b/contrib/python/botocore/py3/botocore/docs/paginator.py index 0a0b7b7189..aa81395c11 100644 --- a/contrib/python/botocore/py3/botocore/docs/paginator.py +++ b/contrib/python/botocore/py3/botocore/docs/paginator.py @@ -17,7 +17,7 @@ from botocore.docs.utils import DocumentedShape from botocore.utils import get_service_module_name -class PaginatorDocumenter(object): +class PaginatorDocumenter: def __init__(self, client, service_paginator_model): self._client = client self._service_name = self._client.meta.service_model.service_name @@ -33,13 +33,14 @@ class PaginatorDocumenter(object): section.writeln('The available paginators are:') paginator_names = sorted( - self._service_paginator_model._paginator_config) + self._service_paginator_model._paginator_config + ) # List the available paginators and then document each paginator. for paginator_name in paginator_names: section.style.li( - ':py:class:`%s.Paginator.%s`' % ( - self._client.__class__.__name__, paginator_name)) + f':py:class:`{self._client.__class__.__name__}.Paginator.{paginator_name}`' + ) self._add_paginator(section, paginator_name) def _add_paginator(self, section, paginator_name): @@ -47,33 +48,40 @@ class PaginatorDocumenter(object): # Docment the paginator class section.style.start_sphinx_py_class( - class_name='%s.Paginator.%s' % ( - self._client.__class__.__name__, paginator_name)) + class_name=( + f'{self._client.__class__.__name__}.Paginator.{paginator_name}' + ) + ) section.style.start_codeblock() section.style.new_line() # Document how to instantiate the paginator. section.write( - 'paginator = client.get_paginator(\'%s\')' % xform_name( - paginator_name) + f"paginator = client.get_paginator('{xform_name(paginator_name)}')" ) section.style.end_codeblock() section.style.new_line() # Get the pagination model for the particular paginator. paginator_config = self._service_paginator_model.get_paginator( - paginator_name) + paginator_name + ) document_paginate_method( section=section, paginator_name=paginator_name, event_emitter=self._client.meta.events, service_model=self._client.meta.service_model, - paginator_config=paginator_config + paginator_config=paginator_config, ) -def document_paginate_method(section, paginator_name, event_emitter, - service_model, paginator_config, - include_signature=True): +def document_paginate_method( + section, + paginator_name, + event_emitter, + service_model, + paginator_config, + include_signature=True, +): """Documents the paginate method of a paginator :param section: The section to write to @@ -91,8 +99,7 @@ def document_paginate_method(section, paginator_name, event_emitter, It is useful for generating docstrings. """ # Retrieve the operation model of the underlying operation. - operation_model = service_model.operation_model( - paginator_name) + operation_model = service_model.operation_model(paginator_name) # Add representations of the request and response parameters # we want to include in the description of the paginate method. @@ -100,40 +107,52 @@ def document_paginate_method(section, paginator_name, event_emitter, pagination_config_members = OrderedDict() pagination_config_members['MaxItems'] = DocumentedShape( - name='MaxItems', type_name='integer', + name='MaxItems', + type_name='integer', documentation=( '<p>The total number of items to return. If the total ' 'number of items available is more than the value ' 'specified in max-items then a <code>NextToken</code> ' 'will be provided in the output that you can use to ' - 'resume pagination.</p>')) + 'resume pagination.</p>' + ), + ) if paginator_config.get('limit_key', None): pagination_config_members['PageSize'] = DocumentedShape( - name='PageSize', type_name='integer', - documentation='<p>The size of each page.<p>') + name='PageSize', + type_name='integer', + documentation='<p>The size of each page.<p>', + ) pagination_config_members['StartingToken'] = DocumentedShape( - name='StartingToken', type_name='string', + name='StartingToken', + type_name='string', documentation=( '<p>A token to specify where to start paginating. ' 'This is the <code>NextToken</code> from a previous ' - 'response.</p>')) + 'response.</p>' + ), + ) botocore_pagination_params = [ DocumentedShape( - name='PaginationConfig', type_name='structure', + name='PaginationConfig', + type_name='structure', documentation=( '<p>A dictionary that provides parameters to control ' - 'pagination.</p>'), - members=pagination_config_members) + 'pagination.</p>' + ), + members=pagination_config_members, + ) ] botocore_pagination_response_params = [ DocumentedShape( - name='NextToken', type_name='string', - documentation=( - '<p>A token to resume pagination.</p>')) + name='NextToken', + type_name='string', + documentation=('<p>A token to resume pagination.</p>'), + ) ] service_pagination_params = [] @@ -152,20 +171,23 @@ def document_paginate_method(section, paginator_name, event_emitter, # Hide the output tokens in the documentation. service_pagination_response_params = [] if isinstance(paginator_config['output_token'], list): - service_pagination_response_params += paginator_config[ - 'output_token'] + service_pagination_response_params += paginator_config['output_token'] else: - service_pagination_response_params.append(paginator_config[ - 'output_token']) + service_pagination_response_params.append( + paginator_config['output_token'] + ) paginate_description = ( 'Creates an iterator that will paginate through responses ' - 'from :py:meth:`{0}.Client.{1}`.'.format( - get_service_module_name(service_model), xform_name(paginator_name)) + 'from :py:meth:`{}.Client.{}`.'.format( + get_service_module_name(service_model), xform_name(paginator_name) + ) ) document_model_driven_method( - section, 'paginate', operation_model, + section, + 'paginate', + operation_model, event_emitter=event_emitter, method_description=paginate_description, example_prefix='response_iterator = paginator.paginate', @@ -173,5 +195,5 @@ def document_paginate_method(section, paginator_name, event_emitter, include_output=botocore_pagination_response_params, exclude_input=service_pagination_params, exclude_output=service_pagination_response_params, - include_signature=include_signature + include_signature=include_signature, ) diff --git a/contrib/python/botocore/py3/botocore/docs/params.py b/contrib/python/botocore/py3/botocore/docs/params.py index 8fab6a5da3..335d6d5029 100644 --- a/contrib/python/botocore/py3/botocore/docs/params.py +++ b/contrib/python/botocore/py3/botocore/docs/params.py @@ -32,53 +32,77 @@ class BaseParamsDocumenter(ShapeDocumenter): """ history = [] self.traverse_and_document_shape( - section=section, shape=shape, history=history, - name=None, include=include, exclude=exclude) + section=section, + shape=shape, + history=history, + name=None, + include=include, + exclude=exclude, + ) def document_recursive_shape(self, section, shape, **kwargs): self._add_member_documentation(section, shape, **kwargs) - def document_shape_default(self, section, shape, history, include=None, - exclude=None, **kwargs): + def document_shape_default( + self, section, shape, history, include=None, exclude=None, **kwargs + ): self._add_member_documentation(section, shape, **kwargs) - def document_shape_type_list(self, section, shape, history, include=None, - exclude=None, **kwargs): + def document_shape_type_list( + self, section, shape, history, include=None, exclude=None, **kwargs + ): self._add_member_documentation(section, shape, **kwargs) param_shape = shape.member param_section = section.add_new_section( - param_shape.name, context={'shape': shape.member.name}) + param_shape.name, context={'shape': shape.member.name} + ) self._start_nested_param(param_section) self.traverse_and_document_shape( - section=param_section, shape=param_shape, - history=history, name=None) + section=param_section, + shape=param_shape, + history=history, + name=None, + ) section = section.add_new_section('end-list') self._end_nested_param(section) - def document_shape_type_map(self, section, shape, history, include=None, - exclude=None, **kwargs): + def document_shape_type_map( + self, section, shape, history, include=None, exclude=None, **kwargs + ): self._add_member_documentation(section, shape, **kwargs) key_section = section.add_new_section( - 'key', context={'shape': shape.key.name}) + 'key', context={'shape': shape.key.name} + ) self._start_nested_param(key_section) self._add_member_documentation(key_section, shape.key) param_section = section.add_new_section( - shape.value.name, context={'shape': shape.value.name}) + shape.value.name, context={'shape': shape.value.name} + ) param_section.style.indent() self._start_nested_param(param_section) self.traverse_and_document_shape( - section=param_section, shape=shape.value, - history=history, name=None) + section=param_section, + shape=shape.value, + history=history, + name=None, + ) end_section = section.add_new_section('end-map') self._end_nested_param(end_section) self._end_nested_param(end_section) - def document_shape_type_structure(self, section, shape, history, - include=None, exclude=None, - name=None, **kwargs): + def document_shape_type_structure( + self, + section, + shape, + history, + include=None, + exclude=None, + name=None, + **kwargs, + ): members = self._add_members_to_shape(shape.members, include) self._add_member_documentation(section, shape, name=name) for param in members: @@ -86,11 +110,15 @@ class BaseParamsDocumenter(ShapeDocumenter): continue param_shape = members[param] param_section = section.add_new_section( - param, context={'shape': param_shape.name}) + param, context={'shape': param_shape.name} + ) self._start_nested_param(param_section) self.traverse_and_document_shape( - section=param_section, shape=param_shape, - history=history, name=param) + section=param_section, + shape=param_shape, + history=history, + name=param, + ) section = section.add_new_section('end-structure') self._end_nested_param(section) @@ -158,16 +186,19 @@ class ResponseParamsDocumenter(BaseParamsDocumenter): tagged_union_members_str = ', '.join( ['``%s``' % key for key in shape.members.keys()] ) - unknown_code_example = ('\'SDK_UNKNOWN_MEMBER\': ' - '{\'name\': \'UnknownMemberName\'}') + unknown_code_example = ( + '\'SDK_UNKNOWN_MEMBER\': ' + '{\'name\': \'UnknownMemberName\'}' + ) tagged_union_docs.write(note % (tagged_union_members_str)) example = section.add_new_section('param-unknown-example') example.style.codeblock(unknown_code_example) documentation_section.include_doc_string(shape.documentation) section.style.new_paragraph() - def document_shape_type_event_stream(self, section, shape, history, - **kwargs): + def document_shape_type_event_stream( + self, section, shape, history, **kwargs + ): self.document_shape_type_structure(section, shape, history, **kwargs) @@ -176,8 +207,9 @@ class RequestParamsDocumenter(BaseParamsDocumenter): EVENT_NAME = 'request-params' - def document_shape_type_structure(self, section, shape, history, - include=None, exclude=None, **kwargs): + def document_shape_type_structure( + self, section, shape, history, include=None, exclude=None, **kwargs + ): if len(history) > 1: self._add_member_documentation(section, shape, **kwargs) section.style.indent() @@ -187,26 +219,37 @@ class RequestParamsDocumenter(BaseParamsDocumenter): continue param_shape = members[param] param_section = section.add_new_section( - param, context={'shape': param_shape.name}) + param, context={'shape': param_shape.name} + ) param_section.style.new_line() is_required = param in shape.required_members self.traverse_and_document_shape( - section=param_section, shape=param_shape, - history=history, name=param, is_required=is_required) + section=param_section, + shape=param_shape, + history=history, + name=param, + is_required=is_required, + ) section = section.add_new_section('end-structure') if len(history) > 1: section.style.dedent() section.style.new_line() - def _add_member_documentation(self, section, shape, name=None, - is_top_level_param=False, is_required=False, - **kwargs): + def _add_member_documentation( + self, + section, + shape, + name=None, + is_top_level_param=False, + is_required=False, + **kwargs, + ): py_type = self._get_special_py_type_name(shape) if py_type is None: py_type = py_type_name(shape.type_name) if is_top_level_param: type_section = section.add_new_section('param-type') - type_section.write(':type %s: %s' % (name, py_type)) + type_section.write(f':type {name}: {py_type}') end_type_section = type_section.add_new_section('end-param-type') end_type_section.style.new_line() name_section = section.add_new_section('param-name') @@ -226,7 +269,8 @@ class RequestParamsDocumenter(BaseParamsDocumenter): is_required_section.style.bold('[REQUIRED] ') if shape.documentation: documentation_section = section.add_new_section( - 'param-documentation') + 'param-documentation' + ) documentation_section.style.indent() if getattr(shape, 'is_tagged_union', False): tagged_union_docs = section.add_new_section( diff --git a/contrib/python/botocore/py3/botocore/docs/service.py b/contrib/python/botocore/py3/botocore/docs/service.py index 2f12520756..b68475e5f3 100644 --- a/contrib/python/botocore/py3/botocore/docs/service.py +++ b/contrib/python/botocore/py3/botocore/docs/service.py @@ -17,14 +17,17 @@ from botocore.docs.waiter import WaiterDocumenter from botocore.exceptions import DataNotFoundError -class ServiceDocumenter(object): +class ServiceDocumenter: def __init__(self, service_name, session): self._session = session self._service_name = service_name self._client = self._session.create_client( - service_name, region_name='us-east-1', aws_access_key_id='foo', - aws_secret_access_key='bar') + service_name, + region_name='us-east-1', + aws_access_key_id='foo', + aws_secret_access_key='bar', + ) self._event_emitter = self._client.meta.events self.sections = [ @@ -33,7 +36,7 @@ class ServiceDocumenter(object): 'client-api', 'client-exceptions', 'paginator-api', - 'waiter-api' + 'waiter-api', ] def document_service(self): @@ -42,8 +45,8 @@ class ServiceDocumenter(object): :returns: The reStructured text of the documented service. """ doc_structure = DocumentStructure( - self._service_name, section_names=self.sections, - target='html') + self._service_name, section_names=self.sections, target='html' + ) self.title(doc_structure.get_section('title')) self.table_of_contents(doc_structure.get_section('table-of-contents')) self.client_api(doc_structure.get_section('client-api')) @@ -55,9 +58,7 @@ class ServiceDocumenter(object): def title(self, section): section.style.h1(self._client.__class__.__name__) self._event_emitter.emit( - 'docs.%s.%s' % ('title', - self._service_name), - section=section + f"docs.title.{self._service_name}", section=section ) def table_of_contents(self, section): @@ -78,23 +79,28 @@ class ServiceDocumenter(object): def paginator_api(self, section): try: service_paginator_model = self._session.get_paginator_model( - self._service_name) + self._service_name + ) except DataNotFoundError: return paginator_documenter = PaginatorDocumenter( - self._client, service_paginator_model) + self._client, service_paginator_model + ) paginator_documenter.document_paginators(section) def waiter_api(self, section): if self._client.waiter_names: service_waiter_model = self._session.get_waiter_model( - self._service_name) + self._service_name + ) waiter_documenter = WaiterDocumenter( - self._client, service_waiter_model) + self._client, service_waiter_model + ) waiter_documenter.document_waiters(section) def get_examples(self, service_name, api_version=None): loader = self._session.get_component('data_loader') examples = loader.load_service_model( - service_name, 'examples-1', api_version) + service_name, 'examples-1', api_version + ) return examples['examples'] diff --git a/contrib/python/botocore/py3/botocore/docs/shape.py b/contrib/python/botocore/py3/botocore/docs/shape.py index 462da66000..640a5d18ef 100644 --- a/contrib/python/botocore/py3/botocore/docs/shape.py +++ b/contrib/python/botocore/py3/botocore/docs/shape.py @@ -19,23 +19,29 @@ from botocore.utils import is_json_value_header -class ShapeDocumenter(object): +class ShapeDocumenter: EVENT_NAME = '' - def __init__(self, service_name, operation_name, event_emitter, - context=None): + def __init__( + self, service_name, operation_name, event_emitter, context=None + ): self._service_name = service_name self._operation_name = operation_name self._event_emitter = event_emitter self._context = context if context is None: - self._context = { - 'special_shape_types': {} - } - - def traverse_and_document_shape(self, section, shape, history, - include=None, exclude=None, name=None, - is_required=False): + self._context = {'special_shape_types': {}} + + def traverse_and_document_shape( + self, + section, + shape, + history, + include=None, + exclude=None, + name=None, + is_required=False, + ): """Traverses and documents a shape Will take a self class and call its appropriate methods as a shape @@ -65,29 +71,34 @@ class ShapeDocumenter(object): self.document_recursive_shape(section, shape, name=name) else: history.append(shape.name) - is_top_level_param = (len(history) == 2) + is_top_level_param = len(history) == 2 if hasattr(shape, 'is_document_type') and shape.is_document_type: param_type = 'document' - getattr(self, 'document_shape_type_%s' % param_type, - self.document_shape_default)( - section, shape, history=history, name=name, - include=include, exclude=exclude, - is_top_level_param=is_top_level_param, - is_required=is_required) + getattr( + self, + f"document_shape_type_{param_type}", + self.document_shape_default, + )( + section, + shape, + history=history, + name=name, + include=include, + exclude=exclude, + is_top_level_param=is_top_level_param, + is_required=is_required, + ) if is_top_level_param: self._event_emitter.emit( - 'docs.%s.%s.%s.%s' % (self.EVENT_NAME, - self._service_name, - self._operation_name, - name), - section=section) - at_overlying_method_section = (len(history) == 1) + f"docs.{self.EVENT_NAME}.{self._service_name}.{self._operation_name}.{name}", + section=section, + ) + at_overlying_method_section = len(history) == 1 if at_overlying_method_section: self._event_emitter.emit( - 'docs.%s.%s.%s.complete-section' % (self.EVENT_NAME, - self._service_name, - self._operation_name), - section=section) + f"docs.{self.EVENT_NAME}.{self._service_name}.{self._operation_name}.complete-section", + section=section, + ) history.pop() def _get_special_py_default(self, shape): @@ -116,7 +127,8 @@ class ShapeDocumenter(object): if hasattr(shape, 'is_document_type') and shape.is_document_type: return special_type_map['document_type'] for special_type, marked_shape in self._context[ - 'special_shape_types'].items(): + 'special_shape_types' + ].items(): if special_type in special_type_map: if shape == marked_shape: return special_type_map[special_type] diff --git a/contrib/python/botocore/py3/botocore/docs/sharedexample.py b/contrib/python/botocore/py3/botocore/docs/sharedexample.py index 39cdd41fa8..58cdfa594c 100644 --- a/contrib/python/botocore/py3/botocore/docs/sharedexample.py +++ b/contrib/python/botocore/py3/botocore/docs/sharedexample.py @@ -13,14 +13,14 @@ import numbers import re -from botocore.compat import six from botocore.docs.utils import escape_controls from botocore.utils import parse_timestamp -class SharedExampleDocumenter(object): - def document_shared_example(self, example, prefix, section, - operation_model): +class SharedExampleDocumenter: + def document_shared_example( + self, example, prefix, section, operation_model + ): """Documents a single shared example based on its definition. :param example: The model of the example @@ -34,8 +34,9 @@ class SharedExampleDocumenter(object): section.style.new_paragraph() section.write(example.get('description')) section.style.new_line() - self.document_input(section, example, prefix, - operation_model.input_shape) + self.document_input( + section, example, prefix, operation_model.input_shape + ) self.document_output(section, example, operation_model.output_shape) def document_input(self, section, example, prefix, shape): @@ -97,8 +98,9 @@ class SharedExampleDocumenter(object): else: self._document_str(section, value, path) - def _document_dict(self, section, value, comments, path, shape, - top_level=False): + def _document_dict( + self, section, value, comments, path, shape, top_level=False + ): dict_section = section.add_new_section('dict-value') self._start_nested_value(dict_section, '{') for key, val in value.items(): @@ -168,7 +170,7 @@ class SharedExampleDocumenter(object): # We do the string conversion because this might accept a type that # we don't specifically address. safe_value = escape_controls(value) - section.write(u"'%s'," % six.text_type(safe_value)) + section.write(f"'{safe_value}',") def _document_number(self, section, value, path): section.write("%s," % str(value)) @@ -199,8 +201,9 @@ class SharedExampleDocumenter(object): section.write(end) -def document_shared_examples(section, operation_model, example_prefix, - shared_examples): +def document_shared_examples( + section, operation_model, example_prefix, shared_examples +): """Documents the shared examples :param section: The section to write to. @@ -220,5 +223,5 @@ def document_shared_examples(section, operation_model, example_prefix, example=example, section=container_section.add_new_section(example['id']), prefix=example_prefix, - operation_model=operation_model + operation_model=operation_model, ) diff --git a/contrib/python/botocore/py3/botocore/docs/utils.py b/contrib/python/botocore/py3/botocore/docs/utils.py index a0d2d6623f..eb6cae145c 100644 --- a/contrib/python/botocore/py3/botocore/docs/utils.py +++ b/contrib/python/botocore/py3/botocore/docs/utils.py @@ -73,31 +73,53 @@ def get_official_service_name(service_model): if short_name.startswith('AWS'): short_name = short_name[4:] if short_name and short_name.lower() not in official_name.lower(): - official_name += ' ({0})'.format(short_name) + official_name += f' ({short_name})' return official_name _DocumentedShape = namedtuple( - 'DocumentedShape', ['name', 'type_name', 'documentation', 'metadata', - 'members', 'required_members']) - - -class DocumentedShape (_DocumentedShape): + 'DocumentedShape', + [ + 'name', + 'type_name', + 'documentation', + 'metadata', + 'members', + 'required_members', + ], +) + + +class DocumentedShape(_DocumentedShape): """Use this class to inject new shapes into a model for documentation""" - def __new__(cls, name, type_name, documentation, metadata=None, - members=None, required_members=None): + + def __new__( + cls, + name, + type_name, + documentation, + metadata=None, + members=None, + required_members=None, + ): if metadata is None: metadata = [] if members is None: members = [] if required_members is None: required_members = [] - return super(DocumentedShape, cls).__new__( - cls, name, type_name, documentation, metadata, members, - required_members) - - -class AutoPopulatedParam(object): + return super().__new__( + cls, + name, + type_name, + documentation, + metadata, + members, + required_members, + ) + + +class AutoPopulatedParam: def __init__(self, name, param_description=None): self.name = name self.param_description = param_description @@ -105,7 +127,8 @@ class AutoPopulatedParam(object): self.param_description = ( 'Please note that this parameter is automatically populated ' 'if it is not provided. Including this parameter is not ' - 'required\n') + 'required\n' + ) def document_auto_populated_param(self, event_name, section, **kwargs): """Documents auto populated parameters @@ -120,7 +143,8 @@ class AutoPopulatedParam(object): if 'is-required' in section.available_sections: section.delete_section('is-required') description_section = section.get_section( - 'param-documentation') + 'param-documentation' + ) description_section.writeln(self.param_description) elif event_name.startswith('docs.request-example'): section = section.get_section('structure-value') @@ -128,13 +152,14 @@ class AutoPopulatedParam(object): section.delete_section(self.name) -class HideParamFromOperations(object): +class HideParamFromOperations: """Hides a single parameter from multiple operations. This method will remove a parameter from documentation and from examples. This method is typically used for things that are automatically populated because a user would be unable to provide a value (e.g., a checksum of a serialized XML request body).""" + def __init__(self, service_name, parameter_name, operation_names): """ :type service_name: str @@ -166,8 +191,9 @@ class HideParamFromOperations(object): section.delete_section(self._parameter_name) -class AppendParamDocumentation(object): +class AppendParamDocumentation: """Appends documentation to a specific parameter""" + def __init__(self, parameter_name, doc_string): self._parameter_name = parameter_name self._doc_string = doc_string @@ -175,8 +201,7 @@ class AppendParamDocumentation(object): def append_documentation(self, event_name, section, **kwargs): if self._parameter_name in section.available_sections: section = section.get_section(self._parameter_name) - description_section = section.get_section( - 'param-documentation') + description_section = section.get_section('param-documentation') description_section.writeln(self._doc_string) diff --git a/contrib/python/botocore/py3/botocore/docs/waiter.py b/contrib/python/botocore/py3/botocore/docs/waiter.py index 4aaca10bee..8c4798a4bc 100644 --- a/contrib/python/botocore/py3/botocore/docs/waiter.py +++ b/contrib/python/botocore/py3/botocore/docs/waiter.py @@ -17,7 +17,7 @@ from botocore.docs.utils import DocumentedShape from botocore.utils import get_service_module_name -class WaiterDocumenter(object): +class WaiterDocumenter: def __init__(self, client, service_waiter_model): self._client = client self._service_name = self._client.meta.service_model.service_name @@ -33,15 +33,15 @@ class WaiterDocumenter(object): section.writeln('The available waiters are:') for waiter_name in self._service_waiter_model.waiter_names: section.style.li( - ':py:class:`%s.Waiter.%s`' % ( - self._client.__class__.__name__, waiter_name)) + f":py:class:`{self._client.__class__.__name__}.Waiter.{waiter_name}`" + ) self._add_single_waiter(section, waiter_name) def _add_single_waiter(self, section, waiter_name): section = section.add_new_section(waiter_name) section.style.start_sphinx_py_class( - class_name='%s.Waiter.%s' % ( - self._client.__class__.__name__, waiter_name)) + class_name=f"{self._client.__class__.__name__}.Waiter.{waiter_name}" + ) # Add example on how to instantiate waiter. section.style.start_codeblock() @@ -58,13 +58,18 @@ class WaiterDocumenter(object): waiter_name=waiter_name, event_emitter=self._client.meta.events, service_model=self._client.meta.service_model, - service_waiter_model=self._service_waiter_model + service_waiter_model=self._service_waiter_model, ) -def document_wait_method(section, waiter_name, event_emitter, - service_model, service_waiter_model, - include_signature=True): +def document_wait_method( + section, + waiter_name, + event_emitter, + service_model, + service_waiter_model, + include_signature=True, +): """Documents a the wait method of a waiter :param section: The section to write to @@ -81,47 +86,59 @@ def document_wait_method(section, waiter_name, event_emitter, It is useful for generating docstrings. """ waiter_model = service_waiter_model.get_waiter(waiter_name) - operation_model = service_model.operation_model( - waiter_model.operation) + operation_model = service_model.operation_model(waiter_model.operation) waiter_config_members = OrderedDict() waiter_config_members['Delay'] = DocumentedShape( - name='Delay', type_name='integer', + name='Delay', + type_name='integer', documentation=( '<p>The amount of time in seconds to wait between ' - 'attempts. Default: {0}</p>'.format(waiter_model.delay))) + 'attempts. Default: {}</p>'.format(waiter_model.delay) + ), + ) waiter_config_members['MaxAttempts'] = DocumentedShape( - name='MaxAttempts', type_name='integer', + name='MaxAttempts', + type_name='integer', documentation=( '<p>The maximum number of attempts to be made. ' - 'Default: {0}</p>'.format(waiter_model.max_attempts))) + 'Default: {}</p>'.format(waiter_model.max_attempts) + ), + ) botocore_waiter_params = [ DocumentedShape( - name='WaiterConfig', type_name='structure', + name='WaiterConfig', + type_name='structure', documentation=( '<p>A dictionary that provides parameters to control ' - 'waiting behavior.</p>'), - members=waiter_config_members) + 'waiting behavior.</p>' + ), + members=waiter_config_members, + ) ] wait_description = ( - 'Polls :py:meth:`{0}.Client.{1}` every {2} ' + 'Polls :py:meth:`{}.Client.{}` every {} ' 'seconds until a successful state is reached. An error is ' - 'returned after {3} failed checks.'.format( + 'returned after {} failed checks.'.format( get_service_module_name(service_model), xform_name(waiter_model.operation), - waiter_model.delay, waiter_model.max_attempts) + waiter_model.delay, + waiter_model.max_attempts, + ) ) document_model_driven_method( - section, 'wait', operation_model, + section, + 'wait', + operation_model, event_emitter=event_emitter, method_description=wait_description, example_prefix='waiter.wait', include_input=botocore_waiter_params, document_output=False, - include_signature=include_signature + include_signature=include_signature, ) diff --git a/contrib/python/botocore/py3/botocore/endpoint.py b/contrib/python/botocore/py3/botocore/endpoint.py index 790d228d38..7814e5ac9b 100644 --- a/contrib/python/botocore/py3/botocore/endpoint.py +++ b/contrib/python/botocore/py3/botocore/endpoint.py @@ -32,7 +32,6 @@ from botocore.utils import ( is_valid_endpoint_url, is_valid_ipv6_endpoint_url, ) -from botocore.compat import six logger = logging.getLogger(__name__) history_recorder = get_global_history_recorder() @@ -61,7 +60,7 @@ def convert_to_response_dict(http_response, operation_model): 'status_code': http_response.status_code, 'context': { 'operation_name': operation_model.name, - } + }, } if response_dict['status_code'] >= 300: response_dict['body'] = http_response.content @@ -75,7 +74,7 @@ def convert_to_response_dict(http_response, operation_model): return response_dict -class Endpoint(object): +class Endpoint: """ Represents an endpoint for a particular service in a specific region. Only an endpoint can make requests. @@ -85,8 +84,15 @@ class Endpoint(object): :ivar host: The fully qualified endpoint hostname. :ivar session: The session object. """ - def __init__(self, host, endpoint_prefix, event_emitter, - response_parser_factory=None, http_session=None): + + def __init__( + self, + host, + endpoint_prefix, + event_emitter, + response_parser_factory=None, + http_session=None, + ): self._endpoint_prefix = endpoint_prefix self._event_emitter = event_emitter self.host = host @@ -99,49 +105,60 @@ class Endpoint(object): self.http_session = URLLib3Session() def __repr__(self): - return '%s(%s)' % (self._endpoint_prefix, self.host) + return f'{self._endpoint_prefix}({self.host})' def make_request(self, operation_model, request_dict): - logger.debug("Making request for %s with params: %s", - operation_model, request_dict) + logger.debug( + "Making request for %s with params: %s", + operation_model, + request_dict, + ) return self._send_request(request_dict, operation_model) def create_request(self, params, operation_model=None): request = create_request_object(params) if operation_model: - request.stream_output = any([ - operation_model.has_streaming_output, - operation_model.has_event_stream_output - ]) + request.stream_output = any( + [ + operation_model.has_streaming_output, + operation_model.has_event_stream_output, + ] + ) service_id = operation_model.service_model.service_id.hyphenize() event_name = 'request-created.{service_id}.{op_name}'.format( - service_id=service_id, - op_name=operation_model.name) - self._event_emitter.emit(event_name, request=request, - operation_name=operation_model.name) + service_id=service_id, op_name=operation_model.name + ) + self._event_emitter.emit( + event_name, + request=request, + operation_name=operation_model.name, + ) prepared_request = self.prepare_request(request) return prepared_request def _encode_headers(self, headers): # In place encoding of headers to utf-8 if they are unicode. for key, value in headers.items(): - if isinstance(value, six.text_type): + if isinstance(value, str): headers[key] = value.encode('utf-8') def prepare_request(self, request): self._encode_headers(request.headers) return request.prepare() - def _calculate_ttl(self, response_received_timestamp, date_header, - read_timeout): + def _calculate_ttl( + self, response_received_timestamp, date_header, read_timeout + ): local_timestamp = datetime.datetime.utcnow() date_conversion = datetime.datetime.strptime( - date_header, - "%a, %d %b %Y %H:%M:%S %Z" + date_header, "%a, %d %b %Y %H:%M:%S %Z" ) estimated_skew = date_conversion - response_received_timestamp - ttl = local_timestamp + datetime.timedelta( - seconds=read_timeout) + estimated_skew + ttl = ( + local_timestamp + + datetime.timedelta(seconds=read_timeout) + + estimated_skew + ) return ttl.strftime('%Y%m%dT%H%M%SZ') def _set_ttl(self, retries_context, read_timeout, success_response): @@ -153,17 +170,15 @@ class Endpoint(object): retries_context['ttl'] = self._calculate_ttl( response_received_timestamp, response_date_header, - read_timeout + read_timeout, ) except Exception: logger.debug( "Exception received when updating retries context with TTL", - exc_info=True + exc_info=True, ) - def _update_retries_context( - self, context, attempt, success_response=None - ): + def _update_retries_context(self, context, attempt, success_response=None): retries_context = context.setdefault('retries', {}) retries_context['attempt'] = attempt if 'invocation-id' not in retries_context: @@ -179,28 +194,36 @@ class Endpoint(object): self._update_retries_context(context, attempts) request = self.create_request(request_dict, operation_model) success_response, exception = self._get_response( - request, operation_model, context) - while self._needs_retry(attempts, operation_model, request_dict, - success_response, exception): + request, operation_model, context + ) + while self._needs_retry( + attempts, + operation_model, + request_dict, + success_response, + exception, + ): attempts += 1 - self._update_retries_context( - context, attempts, success_response - ) + self._update_retries_context(context, attempts, success_response) # If there is a stream associated with the request, we need # to reset it before attempting to send the request again. # This will ensure that we resend the entire contents of the # body. request.reset_stream() # Create a new request when retried (including a new signature). - request = self.create_request( - request_dict, operation_model) + request = self.create_request(request_dict, operation_model) success_response, exception = self._get_response( - request, operation_model, context) - if success_response is not None and \ - 'ResponseMetadata' in success_response[1]: + request, operation_model, context + ) + if ( + success_response is not None + and 'ResponseMetadata' in success_response[1] + ): # We want to share num retries, not num attempts. total_retries = attempts - 1 - success_response[1]['ResponseMetadata']['RetryAttempts'] = total_retries + success_response[1]['ResponseMetadata'][ + 'RetryAttempts' + ] = total_retries if exception is not None: raise exception else: @@ -213,7 +236,8 @@ class Endpoint(object): # If an exception occurs then the success_response is None. # If no exception occurs then exception is None. success_response, exception = self._do_get_response( - request, operation_model, context) + request, operation_model, context + ) kwargs_to_emit = { 'response_dict': None, 'parsed_response': None, @@ -224,25 +248,30 @@ class Endpoint(object): http_response, parsed_response = success_response kwargs_to_emit['parsed_response'] = parsed_response kwargs_to_emit['response_dict'] = convert_to_response_dict( - http_response, operation_model) + http_response, operation_model + ) service_id = operation_model.service_model.service_id.hyphenize() self._event_emitter.emit( - 'response-received.%s.%s' % ( - service_id, operation_model.name), **kwargs_to_emit) + f"response-received.{service_id}.{operation_model.name}", + **kwargs_to_emit, + ) return success_response, exception def _do_get_response(self, request, operation_model, context): try: logger.debug("Sending http request: %s", request) - history_recorder.record('HTTP_REQUEST', { - 'method': request.method, - 'headers': request.headers, - 'streaming': operation_model.has_streaming_input, - 'url': request.url, - 'body': request.body - }) + history_recorder.record( + 'HTTP_REQUEST', + { + 'method': request.method, + 'headers': request.headers, + 'streaming': operation_model.has_streaming_input, + 'url': request.url, + 'body': request.body, + }, + ) service_id = operation_model.service_model.service_id.hyphenize() - event_name = 'before-send.%s.%s' % (service_id, operation_model.name) + event_name = f"before-send.{service_id}.{operation_model.name}" responses = self._event_emitter.emit(event_name, request=request) http_response = first_non_none_response(responses) if http_response is None: @@ -250,24 +279,32 @@ class Endpoint(object): except HTTPClientError as e: return (None, e) except Exception as e: - logger.debug("Exception received when sending HTTP request.", - exc_info=True) + logger.debug( + "Exception received when sending HTTP request.", exc_info=True + ) return (None, e) # This returns the http_response and the parsed_data. - response_dict = convert_to_response_dict(http_response, operation_model) + response_dict = convert_to_response_dict( + http_response, operation_model + ) handle_checksum_body( - http_response, response_dict, context, operation_model, + http_response, + response_dict, + context, + operation_model, ) http_response_record_dict = response_dict.copy() - http_response_record_dict['streaming'] = \ - operation_model.has_streaming_output + http_response_record_dict[ + 'streaming' + ] = operation_model.has_streaming_output history_recorder.record('HTTP_RESPONSE', http_response_record_dict) protocol = operation_model.metadata['protocol'] parser = self._response_parser_factory.create_parser(protocol) parsed_response = parser.parse( - response_dict, operation_model.output_shape) + response_dict, operation_model.output_shape + ) # Do a second parsing pass to pick up on any modeled error fields # NOTE: Ideally, we would push this down into the parser classes but # they currently have no reference to the operation or service model @@ -275,15 +312,20 @@ class Endpoint(object): # output shape but we can't change that now if http_response.status_code >= 300: self._add_modeled_error_fields( - response_dict, parsed_response, - operation_model, parser, + response_dict, + parsed_response, + operation_model, + parser, ) history_recorder.record('PARSED_RESPONSE', parsed_response) return (http_response, parsed_response), None def _add_modeled_error_fields( - self, response_dict, parsed_response, - operation_model, parser, + self, + response_dict, + parsed_response, + operation_model, + parser, ): error_code = parsed_response.get("Error", {}).get("Code") if error_code is None: @@ -296,24 +338,35 @@ class Endpoint(object): # TODO: avoid naming conflicts with ResponseMetadata and Error parsed_response.update(modeled_parse) - def _needs_retry(self, attempts, operation_model, request_dict, - response=None, caught_exception=None): + def _needs_retry( + self, + attempts, + operation_model, + request_dict, + response=None, + caught_exception=None, + ): service_id = operation_model.service_model.service_id.hyphenize() - event_name = 'needs-retry.%s.%s' % ( - service_id, - operation_model.name) + event_name = f"needs-retry.{service_id}.{operation_model.name}" responses = self._event_emitter.emit( - event_name, response=response, endpoint=self, - operation=operation_model, attempts=attempts, - caught_exception=caught_exception, request_dict=request_dict) + event_name, + response=response, + endpoint=self, + operation=operation_model, + attempts=attempts, + caught_exception=caught_exception, + request_dict=request_dict, + ) handler_response = first_non_none_response(responses) if handler_response is None: return False else: # Request needs to be retried, and we need to sleep # for the specified number of times. - logger.debug("Response received to retry, sleeping for " - "%s seconds", handler_response) + logger.debug( + "Response received to retry, sleeping for %s seconds", + handler_response, + ) time.sleep(handler_response) return True @@ -321,21 +374,28 @@ class Endpoint(object): return self.http_session.send(request) -class EndpointCreator(object): +class EndpointCreator: def __init__(self, event_emitter): self._event_emitter = event_emitter def create_endpoint( - self, service_model, region_name, endpoint_url, - verify=None, response_parser_factory=None, - timeout=DEFAULT_TIMEOUT, max_pool_connections=MAX_POOL_CONNECTIONS, - http_session_cls=URLLib3Session, proxies=None, socket_options=None, - client_cert=None, proxies_config=None + self, + service_model, + region_name, + endpoint_url, + verify=None, + response_parser_factory=None, + timeout=DEFAULT_TIMEOUT, + max_pool_connections=MAX_POOL_CONNECTIONS, + http_session_cls=URLLib3Session, + proxies=None, + socket_options=None, + client_cert=None, + proxies_config=None, ): - if ( - not is_valid_endpoint_url(endpoint_url) - and not is_valid_ipv6_endpoint_url(endpoint_url) - ): + if not is_valid_endpoint_url( + endpoint_url + ) and not is_valid_ipv6_endpoint_url(endpoint_url): raise ValueError("Invalid endpoint: %s" % endpoint_url) if proxies is None: @@ -350,7 +410,7 @@ class EndpointCreator(object): max_pool_connections=max_pool_connections, socket_options=socket_options, client_cert=client_cert, - proxies_config=proxies_config + proxies_config=proxies_config, ) return Endpoint( @@ -358,7 +418,7 @@ class EndpointCreator(object): endpoint_prefix=endpoint_prefix, event_emitter=self._event_emitter, response_parser_factory=response_parser_factory, - http_session=http_session + http_session=http_session, ) def _get_proxies(self, url): diff --git a/contrib/python/botocore/py3/botocore/errorfactory.py b/contrib/python/botocore/py3/botocore/errorfactory.py index b192a66ded..d9a1e9cd9c 100644 --- a/contrib/python/botocore/py3/botocore/errorfactory.py +++ b/contrib/python/botocore/py3/botocore/errorfactory.py @@ -14,7 +14,7 @@ from botocore.exceptions import ClientError from botocore.utils import get_service_module_name -class BaseClientExceptions(object): +class BaseClientExceptions: ClientError = ClientError def __init__(self, code_to_exception): @@ -45,15 +45,16 @@ class BaseClientExceptions(object): def __getattr__(self, name): exception_cls_names = [ - exception_cls.__name__ for exception_cls - in self._code_to_exception.values() + exception_cls.__name__ + for exception_cls in self._code_to_exception.values() ] raise AttributeError( - '%r object has no attribute %r. Valid exceptions are: %s' % ( - self, name, ', '.join(exception_cls_names))) + fr"{self} object has no attribute {name}. " + fr"Valid exceptions are: {', '.join(exception_cls_names)}" + ) -class ClientExceptionsFactory(object): +class ClientExceptionsFactory: def __init__(self): self._client_exceptions_cache = {} @@ -84,5 +85,6 @@ class ClientExceptionsFactory(object): code_to_exception[code] = exception_cls cls_name = str(get_service_module_name(service_model) + 'Exceptions') client_exceptions_cls = type( - cls_name, (BaseClientExceptions,), cls_props) + cls_name, (BaseClientExceptions,), cls_props + ) return client_exceptions_cls(code_to_exception) diff --git a/contrib/python/botocore/py3/botocore/eventstream.py b/contrib/python/botocore/py3/botocore/eventstream.py index 89eb5874a6..e71bfa0496 100644 --- a/contrib/python/botocore/py3/botocore/eventstream.py +++ b/contrib/python/botocore/py3/botocore/eventstream.py @@ -20,46 +20,56 @@ from botocore.exceptions import EventStreamError # byte length of the prelude (total_length + header_length + prelude_crc) _PRELUDE_LENGTH = 12 _MAX_HEADERS_LENGTH = 128 * 1024 # 128 Kb -_MAX_PAYLOAD_LENGTH = 16 * 1024 ** 2 # 16 Mb +_MAX_PAYLOAD_LENGTH = 16 * 1024**2 # 16 Mb class ParserError(Exception): - """Base binary flow encoding parsing exception. """ + """Base binary flow encoding parsing exception.""" + pass class DuplicateHeader(ParserError): - """Duplicate header found in the event. """ + """Duplicate header found in the event.""" + def __init__(self, header): message = 'Duplicate header present: "%s"' % header - super(DuplicateHeader, self).__init__(message) + super().__init__(message) class InvalidHeadersLength(ParserError): - """Headers length is longer than the maximum. """ + """Headers length is longer than the maximum.""" + def __init__(self, length): - message = 'Header length of %s exceeded the maximum of %s' % ( - length, _MAX_HEADERS_LENGTH + message = 'Header length of {} exceeded the maximum of {}'.format( + length, + _MAX_HEADERS_LENGTH, ) - super(InvalidHeadersLength, self).__init__(message) + super().__init__(message) class InvalidPayloadLength(ParserError): - """Payload length is longer than the maximum. """ + """Payload length is longer than the maximum.""" + def __init__(self, length): - message = 'Payload length of %s exceeded the maximum of %s' % ( - length, _MAX_PAYLOAD_LENGTH + message = 'Payload length of {} exceeded the maximum of {}'.format( + length, + _MAX_PAYLOAD_LENGTH, ) - super(InvalidPayloadLength, self).__init__(message) + super().__init__(message) class ChecksumMismatch(ParserError): - """Calculated checksum did not match the expected checksum. """ + """Calculated checksum did not match the expected checksum.""" + def __init__(self, expected, calculated): - message = 'Checksum mismatch: expected 0x%08x, calculated 0x%08x' % ( - expected, calculated + message = ( + 'Checksum mismatch: expected 0x{:08x}, calculated 0x{:08x}'.format( + expected, + calculated, + ) ) - super(ChecksumMismatch, self).__init__(message) + super().__init__(message) class NoInitialResponseError(ParserError): @@ -68,12 +78,13 @@ class NoInitialResponseError(ParserError): This exception is raised when the event stream produced no events or the first event in the stream was not of the initial-response type. """ + def __init__(self): message = 'First event was not of the initial-response type' - super(NoInitialResponseError, self).__init__(message) + super().__init__(message) -class DecodeUtils(object): +class DecodeUtils: """Unpacking utility functions used in the decoder. All methods on this class take raw bytes and return a tuple containing @@ -250,7 +261,8 @@ class DecodeUtils(object): :returns: A tuple containing the (utf-8 string, bytes consumed). """ array_bytes, consumed = DecodeUtils.unpack_byte_array( - data, length_byte_size) + data, length_byte_size + ) return array_bytes.decode('utf-8'), consumed @staticmethod @@ -288,8 +300,9 @@ def _validate_checksum(data, checksum, crc=0): raise ChecksumMismatch(checksum, computed_checksum) -class MessagePrelude(object): - """Represents the prelude of an event stream message. """ +class MessagePrelude: + """Represents the prelude of an event stream message.""" + def __init__(self, total_length, headers_length, crc): self.total_length = total_length self.headers_length = headers_length @@ -329,8 +342,9 @@ class MessagePrelude(object): return _PRELUDE_LENGTH + self.headers_length -class EventStreamMessage(object): - """Represents an event stream message. """ +class EventStreamMessage: + """Represents an event stream message.""" + def __init__(self, prelude, headers, payload, crc): self.prelude = prelude self.headers = headers @@ -344,12 +358,12 @@ class EventStreamMessage(object): return { 'status_code': status_code, 'headers': self.headers, - 'body': self.payload + 'body': self.payload, } -class EventStreamHeaderParser(object): - """ Parses the event headers from an event stream message. +class EventStreamHeaderParser: + """Parses the event headers from an event stream message. Expects all of the header data upfront and creates a dictionary of headers to return. This object can be reused multiple times to parse the headers @@ -432,7 +446,7 @@ class EventStreamHeaderParser(object): self._data = self._data[consumed:] -class EventStreamBuffer(object): +class EventStreamBuffer: """Streaming based event stream buffer A buffer class that wraps bytes from an event stream providing parsed @@ -465,27 +479,29 @@ class EventStreamBuffer(object): prelude = MessagePrelude(*raw_prelude) self._validate_prelude(prelude) # The minus 4 removes the prelude crc from the bytes to be checked - _validate_checksum(prelude_bytes[:_PRELUDE_LENGTH - 4], prelude.crc) + _validate_checksum(prelude_bytes[: _PRELUDE_LENGTH - 4], prelude.crc) return prelude def _parse_headers(self): - header_bytes = self._data[_PRELUDE_LENGTH:self._prelude.headers_end] + header_bytes = self._data[_PRELUDE_LENGTH : self._prelude.headers_end] return self._header_parser.parse(header_bytes) def _parse_payload(self): prelude = self._prelude - payload_bytes = self._data[prelude.headers_end:prelude.payload_end] + payload_bytes = self._data[prelude.headers_end : prelude.payload_end] return payload_bytes def _parse_message_crc(self): prelude = self._prelude - crc_bytes = self._data[prelude.payload_end:prelude.total_length] + crc_bytes = self._data[prelude.payload_end : prelude.total_length] message_crc, _ = DecodeUtils.unpack_uint32(crc_bytes) return message_crc def _parse_message_bytes(self): # The minus 4 includes the prelude crc to the bytes to be checked - message_bytes = self._data[_PRELUDE_LENGTH - 4:self._prelude.payload_end] + message_bytes = self._data[ + _PRELUDE_LENGTH - 4 : self._prelude.payload_end + ] return message_bytes def _validate_message_crc(self): @@ -504,7 +520,7 @@ class EventStreamBuffer(object): def _prepare_for_next_message(self): # Advance the data and reset the current prelude - self._data = self._data[self._prelude.total_length:] + self._data = self._data[self._prelude.total_length :] self._prelude = None def next(self): @@ -531,7 +547,7 @@ class EventStreamBuffer(object): return self -class EventStream(object): +class EventStream: """Wrapper class for an event stream body. This wraps the underlying streaming body, parsing it for individual events @@ -574,6 +590,7 @@ class EventStream(object): if not end_event_received: raise Exception("End event not received, request incomplete.") """ + def __init__(self, raw_stream, output_shape, parser, operation_name): self._raw_stream = raw_stream self._output_shape = output_shape @@ -591,8 +608,7 @@ class EventStream(object): event_stream_buffer = EventStreamBuffer() for chunk in self._raw_stream.stream(): event_stream_buffer.add_data(chunk) - for event in event_stream_buffer: - yield event + yield from event_stream_buffer def _parse_event(self, event): response_dict = event.to_response_dict() @@ -613,5 +629,5 @@ class EventStream(object): raise NoInitialResponseError() def close(self): - """Closes the underlying streaming body. """ + """Closes the underlying streaming body.""" self._raw_stream.close() diff --git a/contrib/python/botocore/py3/botocore/exceptions.py b/contrib/python/botocore/py3/botocore/exceptions.py index 700a1ff673..089add9e61 100644 --- a/contrib/python/botocore/py3/botocore/exceptions.py +++ b/contrib/python/botocore/py3/botocore/exceptions.py @@ -11,7 +11,6 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from __future__ import unicode_literals from botocore.vendored import requests from botocore.vendored.requests.packages import urllib3 @@ -35,6 +34,7 @@ class BotoCoreError(Exception): :ivar msg: The descriptive message associated with the error. """ + fmt = 'An unspecified error occurred' def __init__(self, **kwargs): @@ -52,6 +52,7 @@ class DataNotFoundError(BotoCoreError): :ivar data_path: The data path that the user attempted to load. """ + fmt = 'Unable to load data for: {data_path}' @@ -61,9 +62,11 @@ class UnknownServiceError(DataNotFoundError): :ivar service_name: The name of the unknown service. """ + fmt = ( "Unknown service: '{service_name}'. Valid service names are: " - "{known_service_names}") + "{known_service_names}" + ) class UnknownRegionError(BotoCoreError): @@ -72,9 +75,8 @@ class UnknownRegionError(BotoCoreError): :ivar region_name: The name of the unknown region. """ - fmt = ( - "Unknown region: '{region_name}'. {error_msg}" - ) + + fmt = "Unknown region: '{region_name}'. {error_msg}" class ApiVersionNotFoundError(BotoCoreError): @@ -85,6 +87,7 @@ class ApiVersionNotFoundError(BotoCoreError): :ivar data_path: The data path that the user attempted to load. :ivar api_version: The API version that the user attempted to load. """ + fmt = 'Unable to load data {data_path} for: {api_version}' @@ -94,11 +97,14 @@ class HTTPClientError(BotoCoreError): def __init__(self, request=None, response=None, **kwargs): self.request = request self.response = response - super(HTTPClientError, self).__init__(**kwargs) + super().__init__(**kwargs) def __reduce__(self): return _exception_from_packed_args, ( - self.__class__, (self.request, self.response), self.kwargs) + self.__class__, + (self.request, self.response), + self.kwargs, + ) class ConnectionError(BotoCoreError): @@ -127,11 +133,15 @@ class SSLError(ConnectionError, requests.exceptions.SSLError): class ConnectionClosedError(HTTPClientError): fmt = ( 'Connection was closed before we received a valid response ' - 'from endpoint URL: "{endpoint_url}".') + 'from endpoint URL: "{endpoint_url}".' + ) -class ReadTimeoutError(HTTPClientError, requests.exceptions.ReadTimeout, - urllib3.exceptions.ReadTimeoutError): +class ReadTimeoutError( + HTTPClientError, + requests.exceptions.ReadTimeout, + urllib3.exceptions.ReadTimeoutError, +): fmt = 'Read timeout on endpoint URL: "{endpoint_url}"' @@ -151,6 +161,7 @@ class NoCredentialsError(BotoCoreError): """ No credentials could be found. """ + fmt = 'Unable to locate credentials' @@ -161,6 +172,7 @@ class PartialCredentialsError(BotoCoreError): :ivar cred_var: The missing credential variable name. """ + fmt = 'Partial credentials found in {provider}, missing: {cred_var}' @@ -173,6 +185,7 @@ class CredentialRetrievalError(BotoCoreError): retrieved. """ + fmt = 'Error when retrieving credentials from {provider}: {error_msg}' @@ -182,6 +195,7 @@ class UnknownSignatureVersionError(BotoCoreError): :ivar signature_version: The name of the requested signature version. """ + fmt = 'Unknown Signature Version: {signature_version}.' @@ -192,6 +206,7 @@ class ServiceNotInRegionError(BotoCoreError): :ivar service_name: The name of the service. :ivar region_name: The name of the region. """ + fmt = 'Service {service_name} not available in region {region_name}' @@ -207,6 +222,7 @@ class BaseEndpointResolverError(BotoCoreError): class NoRegionError(BaseEndpointResolverError): """No region was specified.""" + fmt = 'You must specify a region.' @@ -219,8 +235,10 @@ class EndpointVariantError(BaseEndpointResolverError): """ - fmt = ('Unable to construct a modeled endpoint with the following ' - 'variant(s) {tags}: ') + fmt = ( + 'Unable to construct a modeled endpoint with the following ' + 'variant(s) {tags}: ' + ) class UnknownEndpointError(BaseEndpointResolverError, ValueError): @@ -230,9 +248,11 @@ class UnknownEndpointError(BaseEndpointResolverError, ValueError): :ivar service_name: The name of the service. :ivar region_name: The name of the region. """ + fmt = ( 'Unable to construct an endpoint for ' - '{service_name} in region {region_name}') + '{service_name} in region {region_name}' + ) class UnknownFIPSEndpointError(BaseEndpointResolverError): @@ -242,6 +262,7 @@ class UnknownFIPSEndpointError(BaseEndpointResolverError): :ivar service_name: The name of the service. :ivar region_name: The name of the region. """ + fmt = ( 'The provided FIPS pseudo-region "{region_name}" is not known for ' 'the service "{service_name}". A FIPS compliant endpoint cannot be ' @@ -256,6 +277,7 @@ class ProfileNotFound(BotoCoreError): :ivar profile: The name of the profile the user attempted to load. """ + fmt = 'The config profile ({profile}) could not be found' @@ -265,6 +287,7 @@ class ConfigParseError(BotoCoreError): :ivar path: The path to the configuration file. """ + fmt = 'Unable to parse config file: {path}' @@ -274,6 +297,7 @@ class ConfigNotFound(BotoCoreError): :ivar path: The path to the configuration file. """ + fmt = 'The specified config file ({path}) could not be found.' @@ -288,8 +312,11 @@ class MissingParametersError(BotoCoreError): other than str(). :ivar missing: The names of the missing parameters. """ - fmt = ('The following required parameters are missing for ' - '{object_name}: {missing}') + + fmt = ( + 'The following required parameters are missing for ' + '{object_name}: {missing}' + ) class ValidationError(BotoCoreError): @@ -303,8 +330,8 @@ class ValidationError(BotoCoreError): :ivar param: The parameter that failed validation. :ivar type_name: The name of the underlying type. """ - fmt = ("Invalid value ('{value}') for param {param} " - "of type {type_name} ") + + fmt = "Invalid value ('{value}') for param {param} " "of type {type_name} " class ParamValidationError(BotoCoreError): @@ -322,8 +349,11 @@ class UnknownKeyError(ValidationError): :ivar param: The name of the parameter. :ivar choices: The valid choices the value can be. """ - fmt = ("Unknown key '{value}' for param '{param}'. Must be one " - "of: {choices}") + + fmt = ( + "Unknown key '{value}' for param '{param}'. Must be one " + "of: {choices}" + ) class RangeError(ValidationError): @@ -335,8 +365,11 @@ class RangeError(ValidationError): :ivar min_value: The specified minimum value. :ivar max_value: The specified maximum value. """ - fmt = ('Value out of range for param {param}: ' - '{min_value} <= {value} <= {max_value}') + + fmt = ( + 'Value out of range for param {param}: ' + '{min_value} <= {value} <= {max_value}' + ) class UnknownParameterError(ValidationError): @@ -347,6 +380,7 @@ class UnknownParameterError(ValidationError): :ivar operation: The name of the operation. :ivar choices: The valid choices the parameter name can be. """ + fmt = ( "Unknown parameter '{name}' for operation {operation}. Must be one " "of: {choices}" @@ -359,9 +393,8 @@ class InvalidRegionError(ValidationError, ValueError): :ivar region_name: region_name that was being validated. """ - fmt = ( - "Provided region_name '{region_name}' doesn't match a supported format." - ) + + fmt = "Provided region_name '{region_name}' doesn't match a supported format." class AliasConflictParameterError(ValidationError): @@ -372,6 +405,7 @@ class AliasConflictParameterError(ValidationError): :ivar alias: The name of the alias :ivar operation: The name of the operation. """ + fmt = ( "Parameter '{original}' and its alias '{alias}' were provided " "for operation {operation}. Only one of them may be used." @@ -384,6 +418,7 @@ class UnknownServiceStyle(BotoCoreError): :ivar service_style: The style requested. """ + fmt = 'The service style ({service_style}) is not understood.' @@ -396,66 +431,77 @@ class OperationNotPageableError(BotoCoreError): class ChecksumError(BotoCoreError): - """The expected checksum did not match the calculated checksum. + """The expected checksum did not match the calculated checksum.""" - """ - fmt = ('Checksum {checksum_type} failed, expected checksum ' - '{expected_checksum} did not match calculated checksum ' - '{actual_checksum}.') + fmt = ( + 'Checksum {checksum_type} failed, expected checksum ' + '{expected_checksum} did not match calculated checksum ' + '{actual_checksum}.' + ) class UnseekableStreamError(BotoCoreError): - """Need to seek a stream, but stream does not support seeking. + """Need to seek a stream, but stream does not support seeking.""" - """ - fmt = ('Need to rewind the stream {stream_object}, but stream ' - 'is not seekable.') + fmt = ( + 'Need to rewind the stream {stream_object}, but stream ' + 'is not seekable.' + ) class WaiterError(BotoCoreError): """Waiter failed to reach desired state.""" + fmt = 'Waiter {name} failed: {reason}' def __init__(self, name, reason, last_response): - super(WaiterError, self).__init__(name=name, reason=reason) + super().__init__(name=name, reason=reason) self.last_response = last_response class IncompleteReadError(BotoCoreError): """HTTP response did not return expected number of bytes.""" - fmt = ('{actual_bytes} read, but total bytes ' - 'expected is {expected_bytes}.') + + fmt = ( + '{actual_bytes} read, but total bytes ' 'expected is {expected_bytes}.' + ) class InvalidExpressionError(BotoCoreError): """Expression is either invalid or too complex.""" + fmt = 'Invalid expression {expression}: Only dotted lookups are supported.' class UnknownCredentialError(BotoCoreError): """Tried to insert before/after an unregistered credential type.""" + fmt = 'Credential named {name} not found.' class WaiterConfigError(BotoCoreError): """Error when processing waiter configuration.""" + fmt = 'Error processing waiter config: {error_msg}' class UnknownClientMethodError(BotoCoreError): """Error when trying to access a method on a client that does not exist.""" + fmt = 'Client does not have method: {method_name}' class UnsupportedSignatureVersionError(BotoCoreError): """Error when trying to use an unsupported Signature Version.""" + fmt = 'Signature version is not supported: {signature_version}' class ClientError(Exception): MSG_TEMPLATE = ( 'An error occurred ({error_code}) when calling the {operation_name} ' - 'operation{retry_info}: {error_message}') + 'operation{retry_info}: {error_message}' + ) def __init__(self, error_response, operation_name): retry_info = self._get_retry_info(error_response) @@ -466,7 +512,7 @@ class ClientError(Exception): operation_name=operation_name, retry_info=retry_info, ) - super(ClientError, self).__init__(msg) + super().__init__(msg) self.response = error_response self.operation_name = operation_name @@ -476,8 +522,9 @@ class ClientError(Exception): metadata = response['ResponseMetadata'] if metadata.get('MaxAttemptsReached', False): if 'RetryAttempts' in metadata: - retry_info = (' (reached max retries: %s)' % - metadata['RetryAttempts']) + retry_info = ( + f" (reached max retries: {metadata['RetryAttempts']})" + ) return retry_info def __reduce__(self): @@ -493,6 +540,7 @@ class EventStreamError(ClientError): class UnsupportedTLSVersionWarning(Warning): """Warn when an openssl version that uses TLS 1.2 is required""" + pass @@ -502,6 +550,7 @@ class ImminentRemovalWarning(Warning): class InvalidDNSNameError(BotoCoreError): """Error when virtual host path is forced on a non-DNS compatible bucket""" + fmt = ( 'Bucket named {bucket_name} is not DNS compatible. Virtual ' 'hosted-style addressing cannot be used. The addressing style ' @@ -513,6 +562,7 @@ class InvalidDNSNameError(BotoCoreError): class InvalidS3AddressingStyleError(BotoCoreError): """Error when an invalid path style is specified""" + fmt = ( 'S3 addressing style {s3_addressing_style} is invalid. Valid options ' 'are: \'auto\', \'virtual\', and \'path\'' @@ -521,6 +571,7 @@ class InvalidS3AddressingStyleError(BotoCoreError): class UnsupportedS3ArnError(BotoCoreError): """Error when S3 ARN provided to Bucket parameter is not supported""" + fmt = ( 'S3 ARN {arn} provided to "Bucket" parameter is invalid. Only ' 'ARNs for S3 access-points are supported.' @@ -529,13 +580,13 @@ class UnsupportedS3ArnError(BotoCoreError): class UnsupportedS3ControlArnError(BotoCoreError): """Error when S3 ARN provided to S3 control parameter is not supported""" - fmt = ( - 'S3 ARN "{arn}" provided is invalid for this operation. {msg}' - ) + + fmt = 'S3 ARN "{arn}" provided is invalid for this operation. {msg}' class InvalidHostLabelError(BotoCoreError): """Error when an invalid host label would be bound to an endpoint""" + fmt = ( 'Invalid host label to be bound to the hostname of the endpoint: ' '"{label}".' @@ -544,6 +595,7 @@ class InvalidHostLabelError(BotoCoreError): class UnsupportedOutpostResourceError(BotoCoreError): """Error when S3 Outpost ARN provided to Bucket parameter is incomplete""" + fmt = ( 'S3 Outpost ARN resource "{resource_name}" provided to "Bucket" ' 'parameter is invalid. Only ARNs for S3 Outpost arns with an ' @@ -553,20 +605,19 @@ class UnsupportedOutpostResourceError(BotoCoreError): class UnsupportedS3ConfigurationError(BotoCoreError): """Error when an unsupported configuration is used with access-points""" - fmt = ( - 'Unsupported configuration when using S3: {msg}' - ) + + fmt = 'Unsupported configuration when using S3: {msg}' class UnsupportedS3AccesspointConfigurationError(BotoCoreError): """Error when an unsupported configuration is used with access-points""" - fmt = ( - 'Unsupported configuration when using S3 access-points: {msg}' - ) + + fmt = 'Unsupported configuration when using S3 access-points: {msg}' class InvalidEndpointDiscoveryConfigurationError(BotoCoreError): """Error when invalid value supplied for endpoint_discovery_enabled""" + fmt = ( 'Unsupported configuration value for endpoint_discovery_enabled. ' 'Expected one of ("true", "false", "auto") but got {config_value}.' @@ -575,13 +626,13 @@ class InvalidEndpointDiscoveryConfigurationError(BotoCoreError): class UnsupportedS3ControlConfigurationError(BotoCoreError): """Error when an unsupported configuration is used with S3 Control""" - fmt = ( - 'Unsupported configuration when using S3 Control: {msg}' - ) + + fmt = 'Unsupported configuration when using S3 Control: {msg}' class InvalidRetryConfigurationError(BotoCoreError): """Error when invalid retry configuration is specified""" + fmt = ( 'Cannot provide retry configuration for "{retry_config_option}". ' 'Valid retry configuration options are: \'max_attempts\'' @@ -590,6 +641,7 @@ class InvalidRetryConfigurationError(BotoCoreError): class InvalidMaxRetryAttemptsError(InvalidRetryConfigurationError): """Error when invalid retry configuration is specified""" + fmt = ( 'Value provided to "max_attempts": {provided_max_attempts} must ' 'be an integer greater than or equal to {min_value}.' @@ -598,6 +650,7 @@ class InvalidMaxRetryAttemptsError(InvalidRetryConfigurationError): class InvalidRetryModeError(InvalidRetryConfigurationError): """Error when invalid retry mode configuration is specified""" + fmt = ( 'Invalid value provided to "mode": "{provided_retry_mode}" must ' 'be one of: "legacy", "standard", "adaptive"' @@ -606,6 +659,7 @@ class InvalidRetryModeError(InvalidRetryConfigurationError): class InvalidS3UsEast1RegionalEndpointConfigError(BotoCoreError): """Error for invalid s3 us-east-1 regional endpoints configuration""" + fmt = ( 'S3 us-east-1 regional endpoint option ' '{s3_us_east_1_regional_endpoint_config} is ' @@ -615,6 +669,7 @@ class InvalidS3UsEast1RegionalEndpointConfigError(BotoCoreError): class InvalidSTSRegionalEndpointsConfigError(BotoCoreError): """Error when invalid sts regional endpoints configuration is specified""" + fmt = ( 'STS regional endpoints option {sts_regional_endpoints_config} is ' 'invalid. Valid options are: "legacy", "regional"' @@ -622,7 +677,9 @@ class InvalidSTSRegionalEndpointsConfigError(BotoCoreError): class StubResponseError(BotoCoreError): - fmt = 'Error getting response stub for operation {operation_name}: {reason}' + fmt = ( + 'Error getting response stub for operation {operation_name}: {reason}' + ) class StubAssertionError(StubResponseError, AssertionError): @@ -694,15 +751,11 @@ class UnauthorizedSSOTokenError(SSOError): class CapacityNotAvailableError(BotoCoreError): - fmt = ( - 'Insufficient request capacity available.' - ) + fmt = 'Insufficient request capacity available.' class InvalidProxiesConfigError(BotoCoreError): - fmt = ( - 'Invalid configuration value(s) provided for proxies_config.' - ) + fmt = 'Invalid configuration value(s) provided for proxies_config.' class InvalidDefaultsMode(BotoCoreError): @@ -721,6 +774,4 @@ class FlexibleChecksumError(BotoCoreError): class InvalidEndpointConfigurationError(BotoCoreError): - fmt = ( - 'Invalid endpoint configuration: {msg}' - ) + fmt = 'Invalid endpoint configuration: {msg}' diff --git a/contrib/python/botocore/py3/botocore/handlers.py b/contrib/python/botocore/py3/botocore/handlers.py index 2c771ca62c..b5b8661922 100644 --- a/contrib/python/botocore/py3/botocore/handlers.py +++ b/contrib/python/botocore/py3/botocore/handlers.py @@ -65,11 +65,11 @@ from botocore.utils import ( ) # Keep these imported. There's pre-existing code that uses them. -from botocore import retryhandler # noqa -from botocore import translate # noqa -from botocore.compat import MD5_AVAILABLE # noqa -from botocore.exceptions import MissingServiceIdError # noqa -from botocore.utils import hyphenize_service_id # noqa +from botocore import retryhandler # noqa +from botocore import translate # noqa +from botocore.compat import MD5_AVAILABLE # noqa +from botocore.exceptions import MissingServiceIdError # noqa +from botocore.utils import hyphenize_service_id # noqa logger = logging.getLogger(__name__) @@ -93,9 +93,7 @@ _OUTPOST_ARN = ( VALID_S3_ARN = re.compile('|'.join([_ACCESSPOINT_ARN, _OUTPOST_ARN])) VERSION_ID_SUFFIX = re.compile(r'\?versionId=[^\s]+$') -SERVICE_NAME_ALIASES = { - 'runtime.sagemaker': 'sagemaker-runtime' -} +SERVICE_NAME_ALIASES = {'runtime.sagemaker': 'sagemaker-runtime'} def handle_service_name_alias(service_name, **kwargs): @@ -149,9 +147,12 @@ def check_for_200_error(response, **kwargs): return http_response, parsed = response if _looks_like_special_case_error(http_response): - logger.debug("Error found for response with 200 status code, " - "errors: %s, changing status code to " - "500.", parsed) + logger.debug( + "Error found for response with 200 status code, " + "errors: %s, changing status code to " + "500.", + parsed, + ) http_response.status_code = 500 @@ -159,8 +160,8 @@ def _looks_like_special_case_error(http_response): if http_response.status_code == 200: try: parser = ETree.XMLParser( - target=ETree.TreeBuilder(), - encoding='utf-8') + target=ETree.TreeBuilder(), encoding='utf-8' + ) parser.feed(http_response.content) root = parser.close() except XMLParseError: @@ -174,7 +175,7 @@ def _looks_like_special_case_error(http_response): def set_operation_specific_signer(context, signing_name, **kwargs): - """ Choose the operation-specific signer. + """Choose the operation-specific signer. Individual operations may have a different auth type than the service as a whole. This will most often manifest as operations that should not be @@ -196,10 +197,7 @@ def set_operation_specific_signer(context, signing_name, **kwargs): if auth_type == 'v4a': # If sigv4a is chosen, we must add additional # signing config for global signature. - signing = { - 'region': '*', - 'signing_name': signing_name - } + signing = {'region': '*', 'signing_name': signing_name} if 'signing' in context: context['signing'].update(signing) else: @@ -229,7 +227,8 @@ def decode_console_output(parsed, **kwargs): # possible that console output contains non string # chars we can't utf-8 decode. value = base64.b64decode(six.b(parsed['Output'])).decode( - 'utf-8', 'replace') + 'utf-8', 'replace' + ) parsed['Output'] = value except (ValueError, TypeError, AttributeError): logger.debug('Error decoding base64', exc_info=True) @@ -239,8 +238,10 @@ def generate_idempotent_uuid(params, model, **kwargs): for name in model.idempotent_members: if name not in params: params[name] = str(uuid.uuid4()) - logger.debug("injecting idempotency token (%s) into param '%s'." % - (params[name], name)) + logger.debug( + "injecting idempotency token (%s) into param '%s'." + % (params[name], name) + ) def decode_quoted_jsondoc(value): @@ -255,7 +256,8 @@ def json_decode_template_body(parsed, **kwargs): if 'TemplateBody' in parsed: try: value = json.loads( - parsed['TemplateBody'], object_pairs_hook=OrderedDict) + parsed['TemplateBody'], object_pairs_hook=OrderedDict + ) parsed['TemplateBody'] = value except (ValueError, TypeError): logger.debug('error loading JSON', exc_info=True) @@ -267,9 +269,10 @@ def validate_bucket_name(params, **kwargs): bucket = params['Bucket'] if not VALID_BUCKET.search(bucket) and not VALID_S3_ARN.search(bucket): error_msg = ( - 'Invalid bucket name "%s": Bucket name must match ' - 'the regex "%s" or be an ARN matching the regex "%s"' % ( - bucket, VALID_BUCKET.pattern, VALID_S3_ARN.pattern)) + f'Invalid bucket name "{bucket}": Bucket name must match ' + f'the regex "{VALID_BUCKET.pattern}" or be an ARN matching ' + f'the regex "{VALID_S3_ARN.pattern}"' + ) raise ParamValidationError(report=error_msg) @@ -300,10 +303,11 @@ def _sse_md5(params, sse_member_prefix='SSECustomer'): sse_key_member = sse_member_prefix + 'Key' sse_md5_member = sse_member_prefix + 'KeyMD5' key_as_bytes = params[sse_key_member] - if isinstance(key_as_bytes, six.text_type): + if isinstance(key_as_bytes, str): key_as_bytes = key_as_bytes.encode('utf-8') - key_md5_str = base64.b64encode( - get_md5(key_as_bytes).digest()).decode('utf-8') + key_md5_str = base64.b64encode(get_md5(key_as_bytes).digest()).decode( + 'utf-8' + ) key_b64_encoded = base64.b64encode(key_as_bytes).decode('utf-8') params[sse_key_member] = key_b64_encoded params[sse_md5_member] = key_md5_str @@ -336,7 +340,7 @@ def add_expect_header(model, params, **kwargs): params['headers']['Expect'] = '100-continue' -class DeprecatedServiceDocumenter(object): +class DeprecatedServiceDocumenter: def __init__(self, replacement_service_name): self._replacement_service_name = replacement_service_name @@ -357,8 +361,10 @@ def document_copy_source_form(section, event_name, **kwargs): param_line = parent.get_section('CopySource') value_portion = param_line.get_section('member-value') value_portion.clear_text() - value_portion.write("'string' or {'Bucket': 'string', " - "'Key': 'string', 'VersionId': 'string'}") + value_portion.write( + "'string' or {'Bucket': 'string', " + "'Key': 'string', 'VersionId': 'string'}" + ) elif 'request-params' in event_name: param_section = section.get_section('CopySource') type_section = param_section.get_section('param-type') @@ -407,7 +413,7 @@ def handle_copy_source_param(params, **kwargs): # param validator take care of this. It will # give a better error message. return - if isinstance(source, six.string_types): + if isinstance(source, str): params['CopySource'] = _quote_source_header(source) elif isinstance(source, dict): params['CopySource'] = _quote_source_header_from_dict(source) @@ -419,12 +425,13 @@ def _quote_source_header_from_dict(source_dict): key = source_dict['Key'] version_id = source_dict.get('VersionId') if VALID_S3_ARN.search(bucket): - final = '%s/object/%s' % (bucket, key) + final = f'{bucket}/object/{key}' else: - final = '%s/%s' % (bucket, key) + final = f'{bucket}/{key}' except KeyError as e: raise ParamValidationError( - report='Missing required parameter: %s' % str(e)) + report=f'Missing required parameter: {str(e)}' + ) final = percent_encode(final, safe=SAFE_CHARS + '/') if version_id is not None: final += '?versionId=%s' % version_id @@ -436,12 +443,13 @@ def _quote_source_header(value): if result is None: return percent_encode(value, safe=SAFE_CHARS + '/') else: - first, version_id = value[:result.start()], value[result.start():] + first, version_id = value[: result.start()], value[result.start() :] return percent_encode(first, safe=SAFE_CHARS + '/') + version_id -def _get_cross_region_presigned_url(request_signer, request_dict, model, - source_region, destination_region): +def _get_cross_region_presigned_url( + request_signer, request_dict, model, source_region, destination_region +): # The better way to do this is to actually get the # endpoint_resolver and get the endpoint_url given the # source region. In this specific case, we know that @@ -453,12 +461,13 @@ def _get_cross_region_presigned_url(request_signer, request_dict, model, request_dict_copy = copy.deepcopy(request_dict) request_dict_copy['body']['DestinationRegion'] = destination_region request_dict_copy['url'] = request_dict['url'].replace( - destination_region, source_region) + destination_region, source_region + ) request_dict_copy['method'] = 'GET' request_dict_copy['headers'] = {} return request_signer.generate_presigned_url( - request_dict_copy, region_name=source_region, - operation_name=model.name) + request_dict_copy, region_name=source_region, operation_name=model.name + ) def _get_presigned_url_source_and_destination_regions(request_signer, params): @@ -473,9 +482,11 @@ def inject_presigned_url_ec2(params, request_signer, model, **kwargs): if 'PresignedUrl' in params['body']: return src, dest = _get_presigned_url_source_and_destination_regions( - request_signer, params['body']) + request_signer, params['body'] + ) url = _get_cross_region_presigned_url( - request_signer, params, model, src, dest) + request_signer, params, model, src, dest + ) params['body']['PresignedUrl'] = url # EC2 Requires that the destination region be sent over the wire in # addition to the source region. @@ -490,7 +501,8 @@ def inject_presigned_url_rds(params, request_signer, model, **kwargs): return src, dest = _get_presigned_url_source_and_destination_regions( - request_signer, params['body']) + request_signer, params['body'] + ) # Since SourceRegion isn't actually modeled for RDS, it needs to be # removed from the request params before we send the actual request. @@ -500,7 +512,8 @@ def inject_presigned_url_rds(params, request_signer, model, **kwargs): return url = _get_cross_region_presigned_url( - request_signer, params, model, src, dest) + request_signer, params, model, src, dest + ) params['body']['PreSignedUrl'] = url @@ -522,11 +535,14 @@ def _decode_policy_types(parsed, shape): shape_name = 'policyDocumentType' if shape.type_name == 'structure': for member_name, member_shape in shape.members.items(): - if member_shape.type_name == 'string' and \ - member_shape.name == shape_name and \ - member_name in parsed: + if ( + member_shape.type_name == 'string' + and member_shape.name == shape_name + and member_name in parsed + ): parsed[member_name] = decode_quoted_jsondoc( - parsed[member_name]) + parsed[member_name] + ) elif member_name in parsed: _decode_policy_types(parsed[member_name], member_shape) if shape.type_name == 'list': @@ -544,9 +560,7 @@ def parse_get_bucket_location(parsed, http_response, **kwargs): if http_response.raw is None: return response_body = http_response.content - parser = ETree.XMLParser( - target=ETree.TreeBuilder(), - encoding='utf-8') + parser = ETree.XMLParser(target=ETree.TreeBuilder(), encoding='utf-8') parser.feed(response_body) root = parser.close() region = root.text @@ -555,17 +569,20 @@ def parse_get_bucket_location(parsed, http_response, **kwargs): def base64_encode_user_data(params, **kwargs): if 'UserData' in params: - if isinstance(params['UserData'], six.text_type): + if isinstance(params['UserData'], str): # Encode it to bytes if it is text. params['UserData'] = params['UserData'].encode('utf-8') - params['UserData'] = base64.b64encode( - params['UserData']).decode('utf-8') + params['UserData'] = base64.b64encode(params['UserData']).decode( + 'utf-8' + ) def document_base64_encoding(param): - description = ('**This value will be base64 encoded automatically. Do ' - 'not base64 encode this value prior to performing the ' - 'operation.**') + description = ( + '**This value will be base64 encoded automatically. Do ' + 'not base64 encode this value prior to performing the ' + 'operation.**' + ) append = AppendParamDocumentation(param, description) return append.append_documentation @@ -598,8 +615,7 @@ def validate_ascii_metadata(params, **kwargs): 'for key "%s", value: "%s". \nS3 metadata can only ' 'contain ASCII characters. ' % (key, value) ) - raise ParamValidationError( - report=error_msg) + raise ParamValidationError(report=error_msg) def fix_route53_ids(params, model, **kwargs): @@ -613,8 +629,11 @@ def fix_route53_ids(params, model, **kwargs): if not input_shape or not hasattr(input_shape, 'members'): return - members = [name for (name, shape) in input_shape.members.items() - if shape.name in ['ResourceId', 'DelegationSetId']] + members = [ + name + for (name, shape) in input_shape.members.items() + if shape.name in ['ResourceId', 'DelegationSetId'] + ] for name in members: if name in params: @@ -635,7 +654,8 @@ def inject_account_id(params, **kwargs): def add_glacier_version(model, params, **kwargs): request_dict = params request_dict['headers']['x-amz-glacier-version'] = model.metadata[ - 'apiVersion'] + 'apiVersion' + ] def add_accept_header(model, params, **kwargs): @@ -659,7 +679,7 @@ def add_glacier_checksums(params, **kwargs): request_dict = params headers = request_dict['headers'] body = request_dict['body'] - if isinstance(body, six.binary_type): + if isinstance(body, bytes): # If the user provided a bytes type instead of a file # like object, we're temporarily create a BytesIO object # so we can use the util functions to calculate the @@ -669,7 +689,8 @@ def add_glacier_checksums(params, **kwargs): starting_position = body.tell() if 'x-amz-content-sha256' not in headers: headers['x-amz-content-sha256'] = utils.calculate_sha256( - body, as_hex=True) + body, as_hex=True + ) body.seek(starting_position) if 'x-amz-sha256-tree-hash' not in headers: headers['x-amz-sha256-tree-hash'] = utils.calculate_tree_hash(body) @@ -693,7 +714,9 @@ def document_glacier_tree_hash_checksum(): return AppendParamDocumentation('checksum', doc).append_documentation -def document_cloudformation_get_template_return_type(section, event_name, **kwargs): +def document_cloudformation_get_template_return_type( + section, event_name, **kwargs +): if 'response-params' in event_name: template_body_section = section.get_section('TemplateBody') type_section = template_body_section.get_section('param-type') @@ -713,6 +736,7 @@ def switch_host_machinelearning(request, **kwargs): def check_openssl_supports_tls_version_1_2(**kwargs): import ssl + try: openssl_version_tuple = ssl.OPENSSL_VERSION_INFO if openssl_version_tuple < (1, 0, 1): @@ -721,7 +745,7 @@ def check_openssl_supports_tls_version_1_2(**kwargs): 'support TLS 1.2, which is required for use of iot-data. ' 'Please use python installed with openssl version 1.0.1 or ' 'higher.' % (ssl.OPENSSL_VERSION), - UnsupportedTLSVersionWarning + UnsupportedTLSVersionWarning, ) # We cannot check the openssl version on python2.6, so we should just # pass on this conveniency check. @@ -759,7 +783,7 @@ def decode_list_object(parsed, context, **kwargs): top_level_keys=['Delimiter', 'Marker', 'NextMarker'], nested_keys=[('Contents', 'Key'), ('CommonPrefixes', 'Prefix')], parsed=parsed, - context=context + context=context, ) @@ -772,7 +796,7 @@ def decode_list_object_v2(parsed, context, **kwargs): top_level_keys=['Delimiter', 'Prefix', 'StartAfter'], nested_keys=[('Contents', 'Key'), ('CommonPrefixes', 'Prefix')], parsed=parsed, - context=context + context=context, ) @@ -815,12 +839,14 @@ def decode_list_object_versions(parsed, context, **kwargs): ('CommonPrefixes', 'Prefix'), ], parsed=parsed, - context=context + context=context, ) def _decode_list_object(top_level_keys, nested_keys, parsed, context): - if parsed.get('EncodingType') == 'url' and context.get('encoding_type_auto_set'): + if parsed.get('EncodingType') == 'url' and context.get( + 'encoding_type_auto_set' + ): # URL decode top-level keys in the response if present. for key in top_level_keys: if key in parsed: @@ -834,9 +860,9 @@ def _decode_list_object(top_level_keys, nested_keys, parsed, context): def convert_body_to_file_like_object(params, **kwargs): if 'Body' in params: - if isinstance(params['Body'], six.string_types): + if isinstance(params['Body'], str): params['Body'] = six.BytesIO(ensure_bytes(params['Body'])) - elif isinstance(params['Body'], six.binary_type): + elif isinstance(params['Body'], bytes): params['Body'] = six.BytesIO(params['Body']) @@ -849,7 +875,7 @@ def _add_parameter_aliases(handler_list): aliases = { 'ec2.*.Filter': 'Filters', 'logs.CreateExportTask.from': 'fromTime', - 'cloudsearchdomain.Search.return': 'returnFields' + 'cloudsearchdomain.Search.return': 'returnFields', } for original, new_name in aliases.items(): @@ -863,16 +889,17 @@ def _add_parameter_aliases(handler_list): parameter_build_event_handler_tuple = ( 'before-parameter-build.' + event_portion, parameter_alias.alias_parameter_in_call, - REGISTER_FIRST + REGISTER_FIRST, ) docs_event_handler_tuple = ( 'docs.*.' + event_portion + '.complete-section', - parameter_alias.alias_parameter_in_documentation) + parameter_alias.alias_parameter_in_documentation, + ) handler_list.append(parameter_build_event_handler_tuple) handler_list.append(docs_event_handler_tuple) -class ParameterAlias(object): +class ParameterAlias: def __init__(self, original_name, alias_name): self._original_name = original_name self._alias_name = alias_name @@ -887,7 +914,7 @@ class ParameterAlias(object): raise AliasConflictParameterError( original=self._original_name, alias=self._alias_name, - operation=model.name + operation=model.name, ) # Remove the alias parameter value and use the old name # instead. @@ -916,14 +943,15 @@ class ParameterAlias(object): def _replace_content(self, section): content = section.getvalue().decode('utf-8') updated_content = content.replace( - self._original_name, self._alias_name) + self._original_name, self._alias_name + ) section.clear_text() section.write(updated_content) -class ClientMethodAlias(object): +class ClientMethodAlias: def __init__(self, actual_name): - """ Aliases a non-extant method to an existing method. + """Aliases a non-extant method to an existing method. :param actual_name: The name of the method that actually exists on the client. @@ -935,9 +963,9 @@ class ClientMethodAlias(object): # TODO: Remove this class as it is no longer used -class HeaderToHostHoister(object): - """Takes a header and moves it to the front of the hoststring. - """ +class HeaderToHostHoister: + """Takes a header and moves it to the front of the hoststring.""" + _VALID_HOSTNAME = re.compile(r'(?!-)[a-z\d-]{1,63}(?<!-)$', re.IGNORECASE) def __init__(self, header_name): @@ -961,10 +989,12 @@ class HeaderToHostHoister(object): def _ensure_header_is_valid_host(self, header): match = self._VALID_HOSTNAME.match(header) if not match: - raise ParamValidationError(report=( - 'Hostnames must contain only - and alphanumeric characters, ' - 'and between 1 and 63 characters long.' - )) + raise ParamValidationError( + report=( + 'Hostnames must contain only - and alphanumeric characters, ' + 'and between 1 and 63 characters long.' + ) + ) def _prepend_to_host(self, url, prefix): url_components = urlsplit(url) @@ -976,7 +1006,7 @@ class HeaderToHostHoister(object): new_netloc, url_components.path, url_components.query, - '' + '', ) new_url = urlunsplit(new_components) return new_url @@ -1015,43 +1045,52 @@ def add_retry_headers(request, **kwargs): BUILTIN_HANDLERS = [ ('choose-service-name', handle_service_name_alias), - ('getattr.mturk.list_hi_ts_for_qualification_type', - ClientMethodAlias('list_hits_for_qualification_type')), - ('before-parameter-build.s3.UploadPart', - convert_body_to_file_like_object, REGISTER_LAST), - ('before-parameter-build.s3.PutObject', - convert_body_to_file_like_object, REGISTER_LAST), + ( + 'getattr.mturk.list_hi_ts_for_qualification_type', + ClientMethodAlias('list_hits_for_qualification_type'), + ), + ( + 'before-parameter-build.s3.UploadPart', + convert_body_to_file_like_object, + REGISTER_LAST, + ), + ( + 'before-parameter-build.s3.PutObject', + convert_body_to_file_like_object, + REGISTER_LAST, + ), ('creating-client-class', add_generate_presigned_url), ('creating-client-class.s3', add_generate_presigned_post), ('creating-client-class.iot-data', check_openssl_supports_tls_version_1_2), ('creating-client-class.lex-runtime-v2', remove_lex_v2_start_conversation), ('after-call.iam', json_decode_policies), - ('after-call.ec2.GetConsoleOutput', decode_console_output), ('after-call.cloudformation.GetTemplate', json_decode_template_body), ('after-call.s3.GetBucketLocation', parse_get_bucket_location), - ('before-parameter-build', generate_idempotent_uuid), - ('before-parameter-build.s3', validate_bucket_name), - - ('before-parameter-build.s3.ListObjects', - set_list_objects_encoding_type_url), - ('before-parameter-build.s3.ListObjectsV2', - set_list_objects_encoding_type_url), - ('before-parameter-build.s3.ListObjectVersions', - set_list_objects_encoding_type_url), - ('before-parameter-build.s3.CopyObject', - handle_copy_source_param), - ('before-parameter-build.s3.UploadPartCopy', - handle_copy_source_param), + ( + 'before-parameter-build.s3.ListObjects', + set_list_objects_encoding_type_url, + ), + ( + 'before-parameter-build.s3.ListObjectsV2', + set_list_objects_encoding_type_url, + ), + ( + 'before-parameter-build.s3.ListObjectVersions', + set_list_objects_encoding_type_url, + ), + ('before-parameter-build.s3.CopyObject', handle_copy_source_param), + ('before-parameter-build.s3.UploadPartCopy', handle_copy_source_param), ('before-parameter-build.s3.CopyObject', validate_ascii_metadata), ('before-parameter-build.s3.PutObject', validate_ascii_metadata), - ('before-parameter-build.s3.CreateMultipartUpload', - validate_ascii_metadata), + ( + 'before-parameter-build.s3.CreateMultipartUpload', + validate_ascii_metadata, + ), ('docs.*.s3.CopyObject.complete-section', document_copy_source_form), ('docs.*.s3.UploadPartCopy.complete-section', document_copy_source_form), - ('before-call', add_recursion_detection_header), ('before-call.s3', add_expect_header), ('before-call.glacier', add_glacier_version), @@ -1067,13 +1106,18 @@ BUILTIN_HANDLERS = [ ('request-created.machinelearning.Predict', switch_host_machinelearning), ('needs-retry.s3.UploadPartCopy', check_for_200_error, REGISTER_FIRST), ('needs-retry.s3.CopyObject', check_for_200_error, REGISTER_FIRST), - ('needs-retry.s3.CompleteMultipartUpload', check_for_200_error, - REGISTER_FIRST), + ( + 'needs-retry.s3.CompleteMultipartUpload', + check_for_200_error, + REGISTER_FIRST, + ), ('choose-signer.cognito-identity.GetId', disable_signing), ('choose-signer.cognito-identity.GetOpenIdToken', disable_signing), ('choose-signer.cognito-identity.UnlinkIdentity', disable_signing), - ('choose-signer.cognito-identity.GetCredentialsForIdentity', - disable_signing), + ( + 'choose-signer.cognito-identity.GetCredentialsForIdentity', + disable_signing, + ), ('choose-signer.sts.AssumeRoleWithSAML', disable_signing), ('choose-signer.sts.AssumeRoleWithWebIdentity', disable_signing), ('choose-signer', set_operation_specific_signer), @@ -1087,125 +1131,172 @@ BUILTIN_HANDLERS = [ ('before-parameter-build.s3.UploadPartCopy', sse_md5), ('before-parameter-build.s3.UploadPartCopy', copy_source_sse_md5), ('before-parameter-build.ec2.RunInstances', base64_encode_user_data), - ('before-parameter-build.autoscaling.CreateLaunchConfiguration', - base64_encode_user_data), + ( + 'before-parameter-build.autoscaling.CreateLaunchConfiguration', + base64_encode_user_data, + ), ('before-parameter-build.route53', fix_route53_ids), ('before-parameter-build.glacier', inject_account_id), ('after-call.s3.ListObjects', decode_list_object), ('after-call.s3.ListObjectsV2', decode_list_object_v2), ('after-call.s3.ListObjectsV1Ext', decode_list_object_v1ext), ('after-call.s3.ListObjectVersions', decode_list_object_versions), - # Cloudsearchdomain search operation will be sent by HTTP POST - ('request-created.cloudsearchdomain.Search', - change_get_to_post), + ('request-created.cloudsearchdomain.Search', change_get_to_post), # Glacier documentation customizations - ('docs.*.glacier.*.complete-section', - AutoPopulatedParam('accountId', 'Note: this parameter is set to "-" by' - 'default if no value is not specified.') - .document_auto_populated_param), - ('docs.*.glacier.UploadArchive.complete-section', - AutoPopulatedParam('checksum').document_auto_populated_param), - ('docs.*.glacier.UploadMultipartPart.complete-section', - AutoPopulatedParam('checksum').document_auto_populated_param), - ('docs.request-params.glacier.CompleteMultipartUpload.complete-section', - document_glacier_tree_hash_checksum()), + ( + 'docs.*.glacier.*.complete-section', + AutoPopulatedParam( + 'accountId', + 'Note: this parameter is set to "-" by' + 'default if no value is not specified.', + ).document_auto_populated_param, + ), + ( + 'docs.*.glacier.UploadArchive.complete-section', + AutoPopulatedParam('checksum').document_auto_populated_param, + ), + ( + 'docs.*.glacier.UploadMultipartPart.complete-section', + AutoPopulatedParam('checksum').document_auto_populated_param, + ), + ( + 'docs.request-params.glacier.CompleteMultipartUpload.complete-section', + document_glacier_tree_hash_checksum(), + ), # Cloudformation documentation customizations - ('docs.*.cloudformation.GetTemplate.complete-section', - document_cloudformation_get_template_return_type), - + ( + 'docs.*.cloudformation.GetTemplate.complete-section', + document_cloudformation_get_template_return_type, + ), # UserData base64 encoding documentation customizations - ('docs.*.ec2.RunInstances.complete-section', - document_base64_encoding('UserData')), - ('docs.*.autoscaling.CreateLaunchConfiguration.complete-section', - document_base64_encoding('UserData')), - + ( + 'docs.*.ec2.RunInstances.complete-section', + document_base64_encoding('UserData'), + ), + ( + 'docs.*.autoscaling.CreateLaunchConfiguration.complete-section', + document_base64_encoding('UserData'), + ), # EC2 CopySnapshot documentation customizations - ('docs.*.ec2.CopySnapshot.complete-section', - AutoPopulatedParam('PresignedUrl').document_auto_populated_param), - ('docs.*.ec2.CopySnapshot.complete-section', - AutoPopulatedParam('DestinationRegion').document_auto_populated_param), + ( + 'docs.*.ec2.CopySnapshot.complete-section', + AutoPopulatedParam('PresignedUrl').document_auto_populated_param, + ), + ( + 'docs.*.ec2.CopySnapshot.complete-section', + AutoPopulatedParam('DestinationRegion').document_auto_populated_param, + ), # S3 SSE documentation modifications - ('docs.*.s3.*.complete-section', - AutoPopulatedParam('SSECustomerKeyMD5').document_auto_populated_param), + ( + 'docs.*.s3.*.complete-section', + AutoPopulatedParam('SSECustomerKeyMD5').document_auto_populated_param, + ), # S3 SSE Copy Source documentation modifications - ('docs.*.s3.*.complete-section', - AutoPopulatedParam('CopySourceSSECustomerKeyMD5').document_auto_populated_param), + ( + 'docs.*.s3.*.complete-section', + AutoPopulatedParam( + 'CopySourceSSECustomerKeyMD5' + ).document_auto_populated_param, + ), # Add base64 information to Lambda - ('docs.*.lambda.UpdateFunctionCode.complete-section', - document_base64_encoding('ZipFile')), + ( + 'docs.*.lambda.UpdateFunctionCode.complete-section', + document_base64_encoding('ZipFile'), + ), # The following S3 operations cannot actually accept a ContentMD5 - ('docs.*.s3.*.complete-section', - HideParamFromOperations( - 's3', 'ContentMD5', - ['DeleteObjects', 'PutBucketAcl', 'PutBucketCors', - 'PutBucketLifecycle', 'PutBucketLogging', 'PutBucketNotification', - 'PutBucketPolicy', 'PutBucketReplication', 'PutBucketRequestPayment', - 'PutBucketTagging', 'PutBucketVersioning', 'PutBucketWebsite', - 'PutObjectAcl']).hide_param), - + ( + 'docs.*.s3.*.complete-section', + HideParamFromOperations( + 's3', + 'ContentMD5', + [ + 'DeleteObjects', + 'PutBucketAcl', + 'PutBucketCors', + 'PutBucketLifecycle', + 'PutBucketLogging', + 'PutBucketNotification', + 'PutBucketPolicy', + 'PutBucketReplication', + 'PutBucketRequestPayment', + 'PutBucketTagging', + 'PutBucketVersioning', + 'PutBucketWebsite', + 'PutObjectAcl', + ], + ).hide_param, + ), ############# # RDS ############# ('creating-client-class.rds', add_generate_db_auth_token), - - ('before-call.rds.CopyDBClusterSnapshot', - inject_presigned_url_rds), - ('before-call.rds.CreateDBCluster', - inject_presigned_url_rds), - ('before-call.rds.CopyDBSnapshot', - inject_presigned_url_rds), - ('before-call.rds.CreateDBInstanceReadReplica', - inject_presigned_url_rds), - ('before-call.rds.StartDBInstanceAutomatedBackupsReplication', - inject_presigned_url_rds), - + ('before-call.rds.CopyDBClusterSnapshot', inject_presigned_url_rds), + ('before-call.rds.CreateDBCluster', inject_presigned_url_rds), + ('before-call.rds.CopyDBSnapshot', inject_presigned_url_rds), + ('before-call.rds.CreateDBInstanceReadReplica', inject_presigned_url_rds), + ( + 'before-call.rds.StartDBInstanceAutomatedBackupsReplication', + inject_presigned_url_rds, + ), # RDS PresignedUrl documentation customizations - ('docs.*.rds.CopyDBClusterSnapshot.complete-section', - AutoPopulatedParam('PreSignedUrl').document_auto_populated_param), - ('docs.*.rds.CreateDBCluster.complete-section', - AutoPopulatedParam('PreSignedUrl').document_auto_populated_param), - ('docs.*.rds.CopyDBSnapshot.complete-section', - AutoPopulatedParam('PreSignedUrl').document_auto_populated_param), - ('docs.*.rds.CreateDBInstanceReadReplica.complete-section', - AutoPopulatedParam('PreSignedUrl').document_auto_populated_param), - ('docs.*.rds.StartDBInstanceAutomatedBackupsReplication.complete-section', - AutoPopulatedParam('PreSignedUrl').document_auto_populated_param), - + ( + 'docs.*.rds.CopyDBClusterSnapshot.complete-section', + AutoPopulatedParam('PreSignedUrl').document_auto_populated_param, + ), + ( + 'docs.*.rds.CreateDBCluster.complete-section', + AutoPopulatedParam('PreSignedUrl').document_auto_populated_param, + ), + ( + 'docs.*.rds.CopyDBSnapshot.complete-section', + AutoPopulatedParam('PreSignedUrl').document_auto_populated_param, + ), + ( + 'docs.*.rds.CreateDBInstanceReadReplica.complete-section', + AutoPopulatedParam('PreSignedUrl').document_auto_populated_param, + ), + ( + 'docs.*.rds.StartDBInstanceAutomatedBackupsReplication.complete-section', + AutoPopulatedParam('PreSignedUrl').document_auto_populated_param, + ), ############# # Neptune ############# - ('before-call.neptune.CopyDBClusterSnapshot', - inject_presigned_url_rds), - ('before-call.neptune.CreateDBCluster', - inject_presigned_url_rds), - + ('before-call.neptune.CopyDBClusterSnapshot', inject_presigned_url_rds), + ('before-call.neptune.CreateDBCluster', inject_presigned_url_rds), # Neptune PresignedUrl documentation customizations - ('docs.*.neptune.CopyDBClusterSnapshot.complete-section', - AutoPopulatedParam('PreSignedUrl').document_auto_populated_param), - ('docs.*.neptune.CreateDBCluster.complete-section', - AutoPopulatedParam('PreSignedUrl').document_auto_populated_param), - + ( + 'docs.*.neptune.CopyDBClusterSnapshot.complete-section', + AutoPopulatedParam('PreSignedUrl').document_auto_populated_param, + ), + ( + 'docs.*.neptune.CreateDBCluster.complete-section', + AutoPopulatedParam('PreSignedUrl').document_auto_populated_param, + ), ############# # DocDB ############# - ('before-call.docdb.CopyDBClusterSnapshot', - inject_presigned_url_rds), - ('before-call.docdb.CreateDBCluster', - inject_presigned_url_rds), - + ('before-call.docdb.CopyDBClusterSnapshot', inject_presigned_url_rds), + ('before-call.docdb.CreateDBCluster', inject_presigned_url_rds), # DocDB PresignedUrl documentation customizations - ('docs.*.docdb.CopyDBClusterSnapshot.complete-section', - AutoPopulatedParam('PreSignedUrl').document_auto_populated_param), - ('docs.*.docdb.CreateDBCluster.complete-section', - AutoPopulatedParam('PreSignedUrl').document_auto_populated_param), - + ( + 'docs.*.docdb.CopyDBClusterSnapshot.complete-section', + AutoPopulatedParam('PreSignedUrl').document_auto_populated_param, + ), + ( + 'docs.*.docdb.CreateDBCluster.complete-section', + AutoPopulatedParam('PreSignedUrl').document_auto_populated_param, + ), ########### # SMS Voice ########### - ('docs.title.sms-voice', - DeprecatedServiceDocumenter('pinpoint-sms-voice').inject_deprecation_notice), + ( + 'docs.title.sms-voice', + DeprecatedServiceDocumenter( + 'pinpoint-sms-voice' + ).inject_deprecation_notice, + ), ('before-call', inject_api_version_header_if_needed), - ] _add_parameter_aliases(BUILTIN_HANDLERS) diff --git a/contrib/python/botocore/py3/botocore/history.py b/contrib/python/botocore/py3/botocore/history.py index 080b381cbb..59d9481d7f 100644 --- a/contrib/python/botocore/py3/botocore/history.py +++ b/contrib/python/botocore/py3/botocore/history.py @@ -16,12 +16,12 @@ HISTORY_RECORDER = None logger = logging.getLogger(__name__) -class BaseHistoryHandler(object): +class BaseHistoryHandler: def emit(self, event_type, payload, source): raise NotImplementedError('emit()') -class HistoryRecorder(object): +class HistoryRecorder: def __init__(self): self._enabled = False self._handlers = [] @@ -43,8 +43,9 @@ class HistoryRecorder(object): except Exception: # Never let the process die because we had a failure in # a record collection handler. - logger.debug("Exception raised in %s.", handler, - exc_info=True) + logger.debug( + "Exception raised in %s.", handler, exc_info=True + ) def get_global_history_recorder(): diff --git a/contrib/python/botocore/py3/botocore/hooks.py b/contrib/python/botocore/py3/botocore/hooks.py index 5f88eccbae..fdf60fba60 100644 --- a/contrib/python/botocore/py3/botocore/hooks.py +++ b/contrib/python/botocore/py3/botocore/hooks.py @@ -14,7 +14,7 @@ import copy import logging from collections import deque, namedtuple -from botocore.compat import accepts_kwargs, six +from botocore.compat import accepts_kwargs from botocore.utils import EVENT_ALIASES logger = logging.getLogger(__name__) @@ -27,7 +27,6 @@ _LAST = 2 class NodeList(_NodeList): - def __copy__(self): first_copy = copy.copy(self.first) middle_copy = copy.copy(self.middle) @@ -64,7 +63,7 @@ def first_non_none_response(responses, default=None): return default -class BaseEventHooks(object): +class BaseEventHooks: def emit(self, event_name, **kwargs): """Call all handlers subscribed to an event. @@ -82,8 +81,9 @@ class BaseEventHooks(object): """ return [] - def register(self, event_name, handler, unique_id=None, - unique_id_uses_count=False): + def register( + self, event_name, handler, unique_id=None, unique_id_uses_count=False + ): """Register an event handler for a given event. If a ``unique_id`` is given, the handler will not be registered @@ -97,12 +97,17 @@ class BaseEventHooks(object): with ``register_last()``. """ - self._verify_and_register(event_name, handler, unique_id, - register_method=self._register, - unique_id_uses_count=unique_id_uses_count) + self._verify_and_register( + event_name, + handler, + unique_id, + register_method=self._register, + unique_id_uses_count=unique_id_uses_count, + ) - def register_first(self, event_name, handler, unique_id=None, - unique_id_uses_count=False): + def register_first( + self, event_name, handler, unique_id=None, unique_id_uses_count=False + ): """Register an event handler to be called first for an event. All event handlers registered with ``register_first()`` will @@ -110,30 +115,50 @@ class BaseEventHooks(object): ``register_last()``. """ - self._verify_and_register(event_name, handler, unique_id, - register_method=self._register_first, - unique_id_uses_count=unique_id_uses_count) + self._verify_and_register( + event_name, + handler, + unique_id, + register_method=self._register_first, + unique_id_uses_count=unique_id_uses_count, + ) - def register_last(self, event_name, handler, unique_id=None, - unique_id_uses_count=False): + def register_last( + self, event_name, handler, unique_id=None, unique_id_uses_count=False + ): """Register an event handler to be called last for an event. All event handlers registered with ``register_last()`` will be called after handlers registered with ``register_first()`` and ``register()``. """ - self._verify_and_register(event_name, handler, unique_id, - register_method=self._register_last, - unique_id_uses_count=unique_id_uses_count) + self._verify_and_register( + event_name, + handler, + unique_id, + register_method=self._register_last, + unique_id_uses_count=unique_id_uses_count, + ) - def _verify_and_register(self, event_name, handler, unique_id, - register_method, unique_id_uses_count): + def _verify_and_register( + self, + event_name, + handler, + unique_id, + register_method, + unique_id_uses_count, + ): self._verify_is_callable(handler) self._verify_accept_kwargs(handler) register_method(event_name, handler, unique_id, unique_id_uses_count) - def unregister(self, event_name, handler=None, unique_id=None, - unique_id_uses_count=False): + def unregister( + self, + event_name, + handler=None, + unique_id=None, + unique_id_uses_count=False, + ): """Unregister an event handler for a given event. If no ``unique_id`` was given during registration, then the @@ -144,7 +169,7 @@ class BaseEventHooks(object): pass def _verify_is_callable(self, func): - if not six.callable(func): + if not callable(func): raise ValueError("Event handler %s must be callable." % func) def _verify_accept_kwargs(self, func): @@ -158,8 +183,10 @@ class BaseEventHooks(object): """ try: if not accepts_kwargs(func): - raise ValueError("Event handler %s must accept keyword " - "arguments (**kwargs)" % func) + raise ValueError( + f"Event handler {func} must accept keyword " + f"arguments (**kwargs)" + ) except TypeError: return False @@ -247,23 +274,38 @@ class HierarchicalEmitter(BaseEventHooks): else: return (None, None) - def _register(self, event_name, handler, unique_id=None, - unique_id_uses_count=False): - self._register_section(event_name, handler, unique_id, - unique_id_uses_count, section=_MIDDLE) + def _register( + self, event_name, handler, unique_id=None, unique_id_uses_count=False + ): + self._register_section( + event_name, + handler, + unique_id, + unique_id_uses_count, + section=_MIDDLE, + ) - def _register_first(self, event_name, handler, unique_id=None, - unique_id_uses_count=False): - self._register_section(event_name, handler, unique_id, - unique_id_uses_count, section=_FIRST) + def _register_first( + self, event_name, handler, unique_id=None, unique_id_uses_count=False + ): + self._register_section( + event_name, + handler, + unique_id, + unique_id_uses_count, + section=_FIRST, + ) - def _register_last(self, event_name, handler, unique_id, - unique_id_uses_count=False): - self._register_section(event_name, handler, unique_id, - unique_id_uses_count, section=_LAST) + def _register_last( + self, event_name, handler, unique_id, unique_id_uses_count=False + ): + self._register_section( + event_name, handler, unique_id, unique_id_uses_count, section=_LAST + ) - def _register_section(self, event_name, handler, unique_id, - unique_id_uses_count, section): + def _register_section( + self, event_name, handler, unique_id, unique_id_uses_count, section + ): if unique_id is not None: if unique_id in self._unique_id_handlers: # We've already registered a handler using this unique_id @@ -275,7 +317,8 @@ class HierarchicalEmitter(BaseEventHooks): "Initial registration of unique id %s was " "specified to use a counter. Subsequent register " "calls to unique id must specify use of a counter " - "as well." % unique_id) + "as well." % unique_id + ) else: self._unique_id_handlers[unique_id]['count'] += 1 else: @@ -284,14 +327,16 @@ class HierarchicalEmitter(BaseEventHooks): "Initial registration of unique id %s was " "specified to not use a counter. Subsequent " "register calls to unique id must specify not to " - "use a counter as well." % unique_id) + "use a counter as well." % unique_id + ) return else: # Note that the trie knows nothing about the unique # id. We track uniqueness in this class via the # _unique_id_handlers. - self._handlers.append_item(event_name, handler, - section=section) + self._handlers.append_item( + event_name, handler, section=section + ) unique_id_handler_item = {'handler': handler} if unique_id_uses_count: unique_id_handler_item['count'] = 1 @@ -302,8 +347,13 @@ class HierarchicalEmitter(BaseEventHooks): # clear the cache. This has the opportunity for smarter invalidations. self._lookup_cache = {} - def unregister(self, event_name, handler=None, unique_id=None, - unique_id_uses_count=False): + def unregister( + self, + event_name, + handler=None, + unique_id=None, + unique_id_uses_count=False, + ): if unique_id is not None: try: count = self._unique_id_handlers[unique_id].get('count', None) @@ -316,9 +366,12 @@ class HierarchicalEmitter(BaseEventHooks): raise ValueError( "Initial registration of unique id %s was specified to " "use a counter. Subsequent unregister calls to unique " - "id must specify use of a counter as well." % unique_id) + "id must specify use of a counter as well." % unique_id + ) elif count == 1: - handler = self._unique_id_handlers.pop(unique_id)['handler'] + handler = self._unique_id_handlers.pop(unique_id)[ + 'handler' + ] else: self._unique_id_handlers[unique_id]['count'] -= 1 return @@ -328,7 +381,8 @@ class HierarchicalEmitter(BaseEventHooks): "Initial registration of unique id %s was specified " "to not use a counter. Subsequent unregister calls " "to unique id must specify not to use a counter as " - "well." % unique_id) + "well." % unique_id + ) handler = self._unique_id_handlers.pop(unique_id)['handler'] try: self._handlers.remove_item(event_name, handler) @@ -361,29 +415,37 @@ class EventAliaser(BaseEventHooks): aliased_event_name = self._alias_event_name(event_name) return self._emitter.emit_until_response(aliased_event_name, **kwargs) - def register(self, event_name, handler, unique_id=None, - unique_id_uses_count=False): + def register( + self, event_name, handler, unique_id=None, unique_id_uses_count=False + ): aliased_event_name = self._alias_event_name(event_name) return self._emitter.register( aliased_event_name, handler, unique_id, unique_id_uses_count ) - def register_first(self, event_name, handler, unique_id=None, - unique_id_uses_count=False): + def register_first( + self, event_name, handler, unique_id=None, unique_id_uses_count=False + ): aliased_event_name = self._alias_event_name(event_name) return self._emitter.register_first( aliased_event_name, handler, unique_id, unique_id_uses_count ) - def register_last(self, event_name, handler, unique_id=None, - unique_id_uses_count=False): + def register_last( + self, event_name, handler, unique_id=None, unique_id_uses_count=False + ): aliased_event_name = self._alias_event_name(event_name) return self._emitter.register_last( aliased_event_name, handler, unique_id, unique_id_uses_count ) - def unregister(self, event_name, handler=None, unique_id=None, - unique_id_uses_count=False): + def unregister( + self, + event_name, + handler=None, + unique_id=None, + unique_id_uses_count=False, + ): aliased_event_name = self._alias_event_name(event_name) return self._emitter.unregister( aliased_event_name, handler, unique_id, unique_id_uses_count @@ -418,9 +480,9 @@ class EventAliaser(BaseEventHooks): continue new_name = '.'.join(event_parts) - logger.debug("Changing event name from %s to %s" % ( - event_name, new_name - )) + logger.debug( + f"Changing event name from {event_name} to {new_name}" + ) self._alias_name_cache[event_name] = new_name return new_name @@ -430,20 +492,19 @@ class EventAliaser(BaseEventHooks): def _replace_subsection(self, sections, old_parts, new_part): for i in range(len(sections)): if ( - sections[i] == old_parts[0] and - sections[i:i + len(old_parts)] == old_parts + sections[i] == old_parts[0] + and sections[i : i + len(old_parts)] == old_parts ): - sections[i:i + len(old_parts)] = [new_part] + sections[i : i + len(old_parts)] = [new_part] return def __copy__(self): return self.__class__( - copy.copy(self._emitter), - copy.copy(self._event_aliases) + copy.copy(self._emitter), copy.copy(self._event_aliases) ) -class _PrefixTrie(object): +class _PrefixTrie: """Specialized prefix trie that handles wildcards. The prefixes in this case are based on dot separated @@ -465,6 +526,7 @@ class _PrefixTrie(object): most specific to least specific. """ + def __init__(self): # Each dictionary can be though of as a node, where a node # has values associated with the node, and children is a link @@ -572,8 +634,7 @@ class _PrefixTrie(object): # where a key does not exist. del current_node['children'][key_parts[index]] else: - raise ValueError( - "key is not in trie: %s" % '.'.join(key_parts)) + raise ValueError(f"key is not in trie: {'.'.join(key_parts)}") def __copy__(self): # The fact that we're using a nested dict under the covers diff --git a/contrib/python/botocore/py3/botocore/httpchecksum.py b/contrib/python/botocore/py3/botocore/httpchecksum.py index 3c7970e1b2..7a6c4ac2a8 100644 --- a/contrib/python/botocore/py3/botocore/httpchecksum.py +++ b/contrib/python/botocore/py3/botocore/httpchecksum.py @@ -215,7 +215,7 @@ class StreamingChecksumBody(StreamingBody): self._expected = expected def read(self, amt=None): - chunk = super(StreamingChecksumBody, self).read(amt=amt) + chunk = super().read(amt=amt) self._checksum.update(chunk) if amt is None or (not chunk and amt > 0): self._validate_checksum() @@ -224,8 +224,8 @@ class StreamingChecksumBody(StreamingBody): def _validate_checksum(self): if self._checksum.digest() != base64.b64decode(self._expected): error_msg = ( - "Expected checksum %s did not match calculated checksum: %s" - % (self._expected, self._checksum.b64digest(),) + f"Expected checksum {self._expected} did not match calculated " + f"checksum: {self._checksum.b64digest()}" ) raise FlexibleChecksumError(error_msg=error_msg) @@ -236,7 +236,10 @@ def resolve_checksum_context(request, operation_model, params): def resolve_request_checksum_algorithm( - request, operation_model, params, supported_algorithms=None, + request, + operation_model, + params, + supported_algorithms=None, ): http_checksum = operation_model.http_checksum algorithm_member = http_checksum.get("requestAlgorithmMember") @@ -341,7 +344,9 @@ def _apply_request_trailer_checksum(request): body = io.BytesIO(body) request["body"] = AwsChunkedWrapper( - body, checksum_cls=checksum_cls, checksum_name=location_name, + body, + checksum_cls=checksum_cls, + checksum_name=location_name, ) @@ -353,9 +358,9 @@ def resolve_response_checksum_algorithms( if mode_member and mode_member in params: if supported_algorithms is None: supported_algorithms = _SUPPORTED_CHECKSUM_ALGORITHMS - response_algorithms = set( + response_algorithms = { a.lower() for a in http_checksum.get("responseAlgorithms", []) - ) + } usable_algorithms = [] for algorithm in _ALGORITHMS_PRIORITY_LIST: @@ -431,7 +436,10 @@ def _handle_bytes_response(http_response, response, algorithm): if checksum.digest() != base64.b64decode(expected): error_msg = ( "Expected checksum %s did not match calculated checksum: %s" - % (expected, checksum.b64digest(),) + % ( + expected, + checksum.b64digest(), + ) ) raise FlexibleChecksumError(error_msg=error_msg) return body diff --git a/contrib/python/botocore/py3/botocore/httpsession.py b/contrib/python/botocore/py3/botocore/httpsession.py index 20e59632bd..550c299311 100644 --- a/contrib/python/botocore/py3/botocore/httpsession.py +++ b/contrib/python/botocore/py3/botocore/httpsession.py @@ -70,6 +70,7 @@ DEFAULT_CA_BUNDLE = os.path.join(os.path.dirname(__file__), 'cacert.pem') try: from certifi import where except ImportError: + def where(): return DEFAULT_CA_BUNDLE @@ -79,17 +80,18 @@ def get_cert_path(verify): return verify cert_path = where() - logger.debug("Certificate path: {0}".format(cert_path)) + logger.debug(f"Certificate path: {cert_path}") return cert_path -def create_urllib3_context(ssl_version=None, cert_reqs=None, - options=None, ciphers=None): - """ This function is a vendored version of the same function in urllib3 +def create_urllib3_context( + ssl_version=None, cert_reqs=None, options=None, ciphers=None +): + """This function is a vendored version of the same function in urllib3 - We vendor this function to ensure that the SSL contexts we construct - always use the std lib SSLContext instead of pyopenssl. + We vendor this function to ensure that the SSL contexts we construct + always use the std lib SSLContext instead of pyopenssl. """ # PROTOCOL_TLS is deprecated in Python 3.10 if not ssl_version or ssl_version == PROTOCOL_TLS: @@ -125,9 +127,9 @@ def create_urllib3_context(ssl_version=None, cert_reqs=None, # versions of Python. We only enable on Python 3.7.4+ or if certificate # verification is enabled to work around Python issue #37428 # See: https://bugs.python.org/issue37428 - if (cert_reqs == ssl.CERT_REQUIRED or sys.version_info >= (3, 7, 4)) and getattr( - context, "post_handshake_auth", None - ) is not None: + if ( + cert_reqs == ssl.CERT_REQUIRED or sys.version_info >= (3, 7, 4) + ) and getattr(context, "post_handshake_auth", None) is not None: context.post_handshake_auth = True def disable_check_hostname(): @@ -194,13 +196,14 @@ def _is_ipaddress(host): return is_ipaddress(host) or bool(IPV6_ADDRZ_RE.match(host)) -class ProxyConfiguration(object): +class ProxyConfiguration: """Represents a proxy configuration dictionary and additional settings. This class represents a proxy configuration dictionary and provides utility functions to retreive well structured proxy urls and proxy headers from the proxy configuration dictionary. """ + def __init__(self, proxies=None, proxies_settings=None): if proxies is None: proxies = {} @@ -211,7 +214,7 @@ class ProxyConfiguration(object): self._proxies_settings = proxies_settings def proxy_url_for(self, url): - """Retrieves the corresponding proxy url for a given url. """ + """Retrieves the corresponding proxy url for a given url.""" parsed_url = urlparse(url) proxy = self._proxies.get(parsed_url.scheme) if proxy: @@ -219,7 +222,7 @@ class ProxyConfiguration(object): return proxy def proxy_headers_for(self, proxy_url): - """Retrieves the corresponding proxy headers for a given proxy url. """ + """Retrieves the corresponding proxy headers for a given proxy url.""" headers = {} username, password = self._get_auth_from_url(proxy_url) if username and password: @@ -240,9 +243,9 @@ class ProxyConfiguration(object): return 'http://' + proxy_url def _construct_basic_auth(self, username, password): - auth_str = '{0}:{1}'.format(username, password) + auth_str = f'{username}:{password}' encoded_str = b64encode(auth_str.encode('ascii')).strip().decode() - return 'Basic {0}'.format(encoded_str) + return f'Basic {encoded_str}' def _get_auth_from_url(self, url): parsed_url = urlparse(url) @@ -252,7 +255,7 @@ class ProxyConfiguration(object): return None, None -class URLLib3Session(object): +class URLLib3Session: """A basic HTTP client that supports connection pooling and proxies. This class is inspired by requests.adapters.HTTPAdapter, but has been @@ -263,6 +266,7 @@ class URLLib3Session(object): v2.7.0 implemented this themselves, later version urllib3 support this directly via a flag to urlopen so enabling it if needed should be trivial. """ + def __init__( self, verify=True, @@ -308,7 +312,7 @@ class URLLib3Session(object): 'use_forwarding_for_https': proxies_settings.get( 'proxy_use_forwarding_for_https' ), - **kwargs + **kwargs, } return {k: v for k, v in proxies_kwargs.items() if v is not None} @@ -384,7 +388,7 @@ class URLLib3Session(object): context.load_cert_chain(proxy_cert) return context - except (IOError, URLLib3SSLError, LocationParseError) as e: + except (OSError, URLLib3SSLError, LocationParseError) as e: raise InvalidProxiesConfigError(error=e) def _get_connection_manager(self, url, proxy_url=None): @@ -405,8 +409,8 @@ class URLLib3Session(object): # forwarding for HTTPS through the 'use_forwarding_for_https' parameter. proxy_scheme = urlparse(proxy_url).scheme using_https_forwarding_proxy = ( - proxy_scheme == 'https' and - self._proxies_kwargs().get('use_forwarding_for_https', False) + proxy_scheme == 'https' + and self._proxies_kwargs().get('use_forwarding_for_https', False) ) if using_https_forwarding_proxy or url.startswith('http:'): @@ -467,16 +471,16 @@ class URLLib3Session(object): except (NewConnectionError, socket.gaierror) as e: raise EndpointConnectionError(endpoint_url=request.url, error=e) except ProxyError as e: - raise ProxyConnectionError(proxy_url=mask_proxy_url(proxy_url), error=e) + raise ProxyConnectionError( + proxy_url=mask_proxy_url(proxy_url), error=e + ) except URLLib3ConnectTimeoutError as e: raise ConnectTimeoutError(endpoint_url=request.url, error=e) except URLLib3ReadTimeoutError as e: raise ReadTimeoutError(endpoint_url=request.url, error=e) except ProtocolError as e: raise ConnectionClosedError( - error=e, - request=request, - endpoint_url=request.url + error=e, request=request, endpoint_url=request.url ) except Exception as e: message = 'Exception received when sending urllib3 HTTP request' diff --git a/contrib/python/botocore/py3/botocore/loaders.py b/contrib/python/botocore/py3/botocore/loaders.py index 566f29650a..698aa725e7 100644 --- a/contrib/python/botocore/py3/botocore/loaders.py +++ b/contrib/python/botocore/py3/botocore/loaders.py @@ -124,6 +124,7 @@ def instance_cache(func): ``self._cache`` dictionary. """ + def _wrapper(self, *args, **kwargs): key = (func.__name__,) + args for pair in sorted(kwargs.items()): @@ -133,15 +134,17 @@ def instance_cache(func): data = func(self, *args, **kwargs) self._cache[key] = data return data + return _wrapper -class JSONFileLoader(object): +class JSONFileLoader: """Loader JSON files. This class can load the default format of models, which is a JSON file. """ + def exists(self, file_path): """Checks if the file exists. @@ -246,7 +249,7 @@ def create_loader(search_path_string=None): return Loader(extra_search_paths=paths) -class Loader(object): +class Loader: """Find and load data models. This class will handle searching for and loading data models. @@ -255,17 +258,24 @@ class Loader(object): convenience method over ``load_data`` and ``determine_latest_version``. """ + FILE_LOADER_CLASS = HybridJsonLoader # The included models in botocore/data/ that we ship with botocore. BUILTIN_DATA_PATH = os.path.join(BOTOCORE_ROOT, 'data') # For convenience we automatically add ~/.aws/models to the data path. - CUSTOMER_DATA_PATH = os.path.join(os.path.expanduser('~'), - '.aws', 'models') + CUSTOMER_DATA_PATH = os.path.join( + os.path.expanduser('~'), '.aws', 'models' + ) BUILTIN_EXTRAS_TYPES = ['sdk'] - def __init__(self, extra_search_paths=None, file_loader=None, - cache=None, include_default_search_paths=True, - include_default_extras=True): + def __init__( + self, + extra_search_paths=None, + file_loader=None, + cache=None, + include_default_search_paths=True, + include_default_extras=True, + ): self._cache = {} if file_loader is None: file_loader = self.FILE_LOADER_CLASS() @@ -275,8 +285,9 @@ class Loader(object): else: self._search_paths = [] if include_default_search_paths: - self._search_paths.extend([self.CUSTOMER_DATA_PATH, - self.BUILTIN_DATA_PATH]) + self._search_paths.extend( + [self.CUSTOMER_DATA_PATH, self.BUILTIN_DATA_PATH] + ) self._extras_types = [] if include_default_extras: @@ -319,15 +330,17 @@ class Loader(object): # by searching for the corresponding type_name in each # potential directory. possible_services = [ - d for d in os.listdir(possible_path) - if os.path.isdir(os.path.join(possible_path, d))] + d + for d in os.listdir(possible_path) + if os.path.isdir(os.path.join(possible_path, d)) + ] for service_name in possible_services: full_dirname = os.path.join(possible_path, service_name) api_versions = os.listdir(full_dirname) for api_version in api_versions: - full_load_path = os.path.join(full_dirname, - api_version, - type_name) + full_load_path = os.path.join( + full_dirname, api_version, type_name + ) if self.file_loader.exists(full_load_path): services.add(service_name) break @@ -377,9 +390,9 @@ class Loader(object): """ known_api_versions = set() - for possible_path in self._potential_locations(service_name, - must_exist=True, - is_dir=True): + for possible_path in self._potential_locations( + service_name, must_exist=True, is_dir=True + ): for dirname in os.listdir(possible_path): full_path = os.path.join(possible_path, dirname, type_name) # Only add to the known_api_versions if the directory @@ -432,10 +445,12 @@ class Loader(object): if service_name not in known_services: raise UnknownServiceError( service_name=service_name, - known_service_names=', '.join(sorted(known_services))) + known_service_names=', '.join(sorted(known_services)), + ) if api_version is None: api_version = self.determine_latest_version( - service_name, type_name) + service_name, type_name + ) full_path = os.path.join(service_name, api_version, type_name) model = self.load_data(full_path) @@ -448,7 +463,7 @@ class Loader(object): def _find_extras(self, service_name, type_name, api_version): """Creates an iterator over all the extras data.""" for extras_type in self.extras_types: - extras_name = '%s.%s-extras' % (type_name, extras_type) + extras_name = f'{type_name}.{extras_type}-extras' full_path = os.path.join(service_name, api_version, extras_name) try: @@ -486,8 +501,7 @@ class Loader(object): # We didn't find anything that matched on any path. raise DataNotFoundError(data_path=name) - def _potential_locations(self, name=None, must_exist=False, - is_dir=False): + def _potential_locations(self, name=None, must_exist=False, is_dir=False): # Will give an iterator over the full path of potential locations # according to the search path. for path in self.search_paths: @@ -504,8 +518,9 @@ class Loader(object): yield full_path -class ExtrasProcessor(object): +class ExtrasProcessor: """Processes data from extras files into service models.""" + def process(self, original_model, extra_models): """Processes data from a list of loaded extras files into a model diff --git a/contrib/python/botocore/py3/botocore/model.py b/contrib/python/botocore/py3/botocore/model.py index fa1e8fe815..88374474e9 100644 --- a/contrib/python/botocore/py3/botocore/model.py +++ b/contrib/python/botocore/py3/botocore/model.py @@ -44,20 +44,47 @@ class ServiceId(str): return hyphenize_service_id(self) -class Shape(object): +class Shape: """Object representing a shape from the service model.""" + # To simplify serialization logic, all shape params that are # related to serialization are moved from the top level hash into # a 'serialization' hash. This list below contains the names of all # the attributes that should be moved. - SERIALIZED_ATTRS = ['locationName', 'queryName', 'flattened', 'location', - 'payload', 'streaming', 'timestampFormat', - 'xmlNamespace', 'resultWrapper', 'xmlAttribute', - 'eventstream', 'event', 'eventheader', 'eventpayload', - 'jsonvalue', 'timestampFormat', 'hostLabel'] - METADATA_ATTRS = ['required', 'min', 'max', 'pattern', 'sensitive', 'enum', - 'idempotencyToken', 'error', 'exception', - 'endpointdiscoveryid', 'retryable', 'document', 'union'] + SERIALIZED_ATTRS = [ + 'locationName', + 'queryName', + 'flattened', + 'location', + 'payload', + 'streaming', + 'timestampFormat', + 'xmlNamespace', + 'resultWrapper', + 'xmlAttribute', + 'eventstream', + 'event', + 'eventheader', + 'eventpayload', + 'jsonvalue', + 'timestampFormat', + 'hostLabel', + ] + METADATA_ATTRS = [ + 'required', + 'min', + 'max', + 'pattern', + 'sensitive', + 'enum', + 'idempotencyToken', + 'error', + 'exception', + 'endpointdiscoveryid', + 'retryable', + 'document', + 'union', + ] MAP_TYPE = OrderedDict def __init__(self, shape_name, shape_model, shape_resolver=None): @@ -169,8 +196,7 @@ class Shape(object): return self._shape_resolver.resolve_shape_ref(shape_ref) def __repr__(self): - return "<%s(%s)>" % (self.__class__.__name__, - self.name) + return f"<{self.__class__.__name__}({self.name})>" @property def event_stream_name(self): @@ -241,7 +267,7 @@ class StringShape(Shape): return self.metadata.get('enum', []) -class ServiceModel(object): +class ServiceModel: """ :ivar service_description: The parsed service description dictionary. @@ -273,14 +299,16 @@ class ServiceModel(object): # We want clients to be able to access metadata directly. self.metadata = service_description.get('metadata', {}) self._shape_resolver = ShapeResolver( - service_description.get('shapes', {})) + service_description.get('shapes', {}) + ) self._signature_version = NOT_SET self._service_name = service_name self._instance_cache = {} def shape_for(self, shape_name, member_traits=None): return self._shape_resolver.get_shape_by_name( - shape_name, member_traits) + shape_name, member_traits + ) def shape_for_error_code(self, error_code): return self._error_code_cache.get(error_code, None) @@ -347,9 +375,7 @@ class ServiceModel(object): try: return ServiceId(self._get_metadata_property('serviceId')) except UndefinedModelAttributeError: - raise MissingServiceIdError( - service_name=self._service_name - ) + raise MissingServiceIdError(service_name=self._service_name) @CachedProperty def signing_name(self): @@ -398,8 +424,8 @@ class ServiceModel(object): return self.metadata[name] except KeyError: raise UndefinedModelAttributeError( - '"%s" not defined in the metadata of the model: %s' % - (name, self)) + f'"{name}" not defined in the metadata of the model: {self}' + ) # Signature version is one of the rare properties # than can be modified so a CachedProperty is not used here. @@ -416,10 +442,10 @@ class ServiceModel(object): self._signature_version = value def __repr__(self): - return '%s(%s)' % (self.__class__.__name__, self.service_name) + return f'{self.__class__.__name__}({self.service_name})' -class OperationModel(object): +class OperationModel: def __init__(self, operation_model, service_model, name=None): """ @@ -507,7 +533,8 @@ class OperationModel(object): # input shape. return None return self._service_model.resolve_shape_ref( - self._operation_model['input']) + self._operation_model['input'] + ) @CachedProperty def output_shape(self): @@ -517,7 +544,8 @@ class OperationModel(object): # operation has no expected output. return None return self._service_model.resolve_shape_ref( - self._operation_model['output']) + self._operation_model['output'] + ) @CachedProperty def idempotent_members(self): @@ -526,7 +554,8 @@ class OperationModel(object): return [] return [ - name for (name, shape) in input_shape.members.items() + name + for (name, shape) in input_shape.members.items() if 'idempotencyToken' in shape.metadata and shape.metadata['idempotencyToken'] ] @@ -601,10 +630,10 @@ class OperationModel(object): return None def __repr__(self): - return '%s(name=%s)' % (self.__class__.__name__, self.name) + return f'{self.__class__.__name__}(name={self.name})' -class ShapeResolver(object): +class ShapeResolver: """Resolves shape references.""" # Any type not in this mapping will default to the Shape class. @@ -612,7 +641,7 @@ class ShapeResolver(object): 'structure': StructureShape, 'list': ListShape, 'map': MapShape, - 'string': StringShape + 'string': StringShape, } def __init__(self, shape_map): @@ -627,8 +656,9 @@ class ShapeResolver(object): try: shape_cls = self.SHAPE_CLASSES.get(shape_model['type'], Shape) except KeyError: - raise InvalidShapeError("Shape is missing required key 'type': %s" - % shape_model) + raise InvalidShapeError( + f"Shape is missing required key 'type': {shape_model}" + ) if member_traits: shape_model = shape_model.copy() shape_model.update(member_traits) @@ -652,23 +682,27 @@ class ShapeResolver(object): shape_name = member_traits.pop('shape') except KeyError: raise InvalidShapeReferenceError( - "Invalid model, missing shape reference: %s" % shape_ref) + f"Invalid model, missing shape reference: {shape_ref}" + ) return self.get_shape_by_name(shape_name, member_traits) -class UnresolvableShapeMap(object): - """A ShapeResolver that will throw ValueErrors when shapes are resolved. - """ +class UnresolvableShapeMap: + """A ShapeResolver that will throw ValueErrors when shapes are resolved.""" + def get_shape_by_name(self, shape_name, member_traits=None): - raise ValueError("Attempted to lookup shape '%s', but no shape " - "map was provided.") + raise ValueError( + f"Attempted to lookup shape '{shape_name}', but no shape map was provided." + ) def resolve_shape_ref(self, shape_ref): - raise ValueError("Attempted to resolve shape '%s', but no shape " - "map was provided.") + raise ValueError( + f"Attempted to resolve shape '{shape_ref}', but no shape " + f"map was provided." + ) -class DenormalizedStructureBuilder(object): +class DenormalizedStructureBuilder: """Build a StructureShape from a denormalized model. This is a convenience builder class that makes it easy to construct @@ -704,6 +738,19 @@ class DenormalizedStructureBuilder(object): matters, such as for documentation. """ + + SCALAR_TYPES = ( + 'string', + 'integer', + 'boolean', + 'blob', + 'float', + 'timestamp', + 'long', + 'double', + 'char', + ) + def __init__(self, name=None): self.members = OrderedDict() self._name_generator = ShapeNameGenerator() @@ -736,9 +783,11 @@ class DenormalizedStructureBuilder(object): } self._build_model(denormalized, shapes, self.name) resolver = ShapeResolver(shape_map=shapes) - return StructureShape(shape_name=self.name, - shape_model=shapes[self.name], - shape_resolver=resolver) + return StructureShape( + shape_name=self.name, + shape_model=shapes[self.name], + shape_resolver=resolver, + ) def _build_model(self, model, shapes, shape_name): if model['type'] == 'structure': @@ -747,11 +796,10 @@ class DenormalizedStructureBuilder(object): shapes[shape_name] = self._build_list(model, shapes) elif model['type'] == 'map': shapes[shape_name] = self._build_map(model, shapes) - elif model['type'] in ['string', 'integer', 'boolean', 'blob', 'float', - 'timestamp', 'long', 'double', 'char']: + elif model['type'] in self.SCALAR_TYPES: shapes[shape_name] = self._build_scalar(model) else: - raise InvalidShapeError("Unknown shape type: %s" % model['type']) + raise InvalidShapeError(f"Unknown shape type: {model['type']}") def _build_structure(self, model, shapes): members = OrderedDict() @@ -802,13 +850,14 @@ class DenormalizedStructureBuilder(object): return self._name_generator.new_shape_name(model['type']) -class ShapeNameGenerator(object): +class ShapeNameGenerator: """Generate unique shape names for a type. This class can be used in conjunction with the DenormalizedStructureBuilder to generate unique shape names for a given type. """ + def __init__(self): self._name_cache = defaultdict(int) @@ -840,5 +889,4 @@ class ShapeNameGenerator(object): """ self._name_cache[type_name] += 1 current_index = self._name_cache[type_name] - return '%sType%s' % (type_name.capitalize(), - current_index) + return f'{type_name.capitalize()}Type{current_index}' diff --git a/contrib/python/botocore/py3/botocore/monitoring.py b/contrib/python/botocore/py3/botocore/monitoring.py index 0a01669af0..71d7230246 100644 --- a/contrib/python/botocore/py3/botocore/monitoring.py +++ b/contrib/python/botocore/py3/botocore/monitoring.py @@ -21,7 +21,7 @@ from botocore.retryhandler import EXCEPTION_MAP as RETRYABLE_EXCEPTIONS logger = logging.getLogger(__name__) -class Monitor(object): +class Monitor: _EVENTS_TO_REGISTER = [ 'before-parameter-build', 'request-created', @@ -59,10 +59,13 @@ class Monitor(object): except Exception as e: logger.debug( 'Exception %s raised by client monitor in handling event %s', - e, event_name, exc_info=True) + e, + event_name, + exc_info=True, + ) -class MonitorEventAdapter(object): +class MonitorEventAdapter: def __init__(self, time=time.time): """Adapts event emitter events to produce monitor events @@ -102,21 +105,24 @@ class MonitorEventAdapter(object): def _handle_request_created(self, request, **kwargs): context = request.context new_attempt_event = context[ - 'current_api_call_event'].new_api_call_attempt( - timestamp=self._get_current_time()) + 'current_api_call_event' + ].new_api_call_attempt(timestamp=self._get_current_time()) new_attempt_event.request_headers = request.headers new_attempt_event.url = request.url context['current_api_call_attempt_event'] = new_attempt_event - def _handle_response_received(self, parsed_response, context, exception, - **kwargs): + def _handle_response_received( + self, parsed_response, context, exception, **kwargs + ): attempt_event = context.pop('current_api_call_attempt_event') attempt_event.latency = self._get_latency(attempt_event) if parsed_response is not None: attempt_event.http_status_code = parsed_response[ - 'ResponseMetadata']['HTTPStatusCode'] + 'ResponseMetadata' + ]['HTTPStatusCode'] attempt_event.response_headers = parsed_response[ - 'ResponseMetadata']['HTTPHeaders'] + 'ResponseMetadata' + ]['HTTPHeaders'] attempt_event.parsed_error = parsed_response.get('Error') else: attempt_event.wire_exception = exception @@ -124,7 +130,8 @@ class MonitorEventAdapter(object): def _handle_after_call(self, context, parsed, **kwargs): context['current_api_call_event'].retries_exceeded = parsed[ - 'ResponseMetadata'].get('MaxAttemptsReached', False) + 'ResponseMetadata' + ].get('MaxAttemptsReached', False) return self._complete_api_call(context) def _handle_after_call_error(self, context, exception, **kwargs): @@ -132,13 +139,15 @@ class MonitorEventAdapter(object): # was a retryable connection error, then the retries must have exceeded # for that exception as this event gets emitted **after** retries # happen. - context['current_api_call_event'].retries_exceeded = \ - self._is_retryable_exception(exception) + context[ + 'current_api_call_event' + ].retries_exceeded = self._is_retryable_exception(exception) return self._complete_api_call(context) def _is_retryable_exception(self, exception): return isinstance( - exception, tuple(RETRYABLE_EXCEPTIONS['GENERAL_CONNECTION_ERROR'])) + exception, tuple(RETRYABLE_EXCEPTIONS['GENERAL_CONNECTION_ERROR']) + ) def _complete_api_call(self, context): call_event = context.pop('current_api_call_event') @@ -152,7 +161,7 @@ class MonitorEventAdapter(object): return int(self._time() * 1000) -class BaseMonitorEvent(object): +class BaseMonitorEvent: def __init__(self, service, operation, timestamp): """Base monitor event @@ -172,7 +181,7 @@ class BaseMonitorEvent(object): self.timestamp = timestamp def __repr__(self): - return '%s(%r)' % (self.__class__.__name__, self.__dict__) + return f'{self.__class__.__name__}({self.__dict__!r})' def __eq__(self, other): if isinstance(other, self.__class__): @@ -181,8 +190,15 @@ class BaseMonitorEvent(object): class APICallEvent(BaseMonitorEvent): - def __init__(self, service, operation, timestamp, latency=None, - attempts=None, retries_exceeded=False): + def __init__( + self, + service, + operation, + timestamp, + latency=None, + attempts=None, + retries_exceeded=False, + ): """Monitor event for a single API call This event corresponds to a single client method call, which includes @@ -210,8 +226,9 @@ class APICallEvent(BaseMonitorEvent): :param retries_exceeded: True if API call exceeded retries. False otherwise """ - super(APICallEvent, self).__init__( - service=service, operation=operation, timestamp=timestamp) + super().__init__( + service=service, operation=operation, timestamp=timestamp + ) self.latency = latency self.attempts = attempts if attempts is None: @@ -226,19 +243,26 @@ class APICallEvent(BaseMonitorEvent): APICallAttemptEvent """ attempt_event = APICallAttemptEvent( - service=self.service, - operation=self.operation, - timestamp=timestamp + service=self.service, operation=self.operation, timestamp=timestamp ) self.attempts.append(attempt_event) return attempt_event class APICallAttemptEvent(BaseMonitorEvent): - def __init__(self, service, operation, timestamp, - latency=None, url=None, http_status_code=None, - request_headers=None, response_headers=None, - parsed_error=None, wire_exception=None): + def __init__( + self, + service, + operation, + timestamp, + latency=None, + url=None, + http_status_code=None, + request_headers=None, + response_headers=None, + parsed_error=None, + wire_exception=None, + ): """Monitor event for a single API call attempt This event corresponds to a single HTTP request attempt in completing @@ -283,7 +307,7 @@ class APICallAttemptEvent(BaseMonitorEvent): :param wire_exception: The exception raised in sending the HTTP request (i.e. ConnectionError) """ - super(APICallAttemptEvent, self).__init__( + super().__init__( service=service, operation=operation, timestamp=timestamp ) self.latency = latency @@ -295,7 +319,7 @@ class APICallAttemptEvent(BaseMonitorEvent): self.wire_exception = wire_exception -class CSMSerializer(object): +class CSMSerializer: _MAX_CLIENT_ID_LENGTH = 255 _MAX_EXCEPTION_CLASS_LENGTH = 128 _MAX_ERROR_CODE_LENGTH = 128 @@ -312,9 +336,7 @@ class CSMSerializer(object): r'Credential=(?P<access_key>\w+)/\d+/' r'(?P<signing_region>[a-z0-9-]+)/' ), - 's3': re.compile( - r'AWS (?P<access_key>\w+):' - ) + 's3': re.compile(r'AWS (?P<access_key>\w+):'), } _SERIALIZEABLE_EVENT_PROPERTIES = [ 'service', @@ -344,9 +366,8 @@ class CSMSerializer(object): def _validate_client_id(self, csm_client_id): if len(csm_client_id) > self._MAX_CLIENT_ID_LENGTH: raise ValueError( - 'The value provided for csm_client_id: %s exceeds the ' - 'maximum length of %s characters' % ( - csm_client_id, self._MAX_CLIENT_ID_LENGTH) + f'The value provided for csm_client_id: {csm_client_id} exceeds ' + f'the maximum length of {self._MAX_CLIENT_ID_LENGTH} characters' ) def serialize(self, event): @@ -365,9 +386,9 @@ class CSMSerializer(object): value = getattr(event, attr, None) if value is not None: getattr(self, '_serialize_' + attr)( - value, event_dict, event_type=event_type) - return ensure_bytes( - json.dumps(event_dict, separators=(',', ':'))) + value, event_dict, event_type=event_type + ) + return ensure_bytes(json.dumps(event_dict, separators=(',', ':'))) def _get_base_event_dict(self, event): return { @@ -397,15 +418,18 @@ class CSMSerializer(object): if region is not None: event_dict['Region'] = region event_dict['UserAgent'] = self._get_user_agent( - last_attempt.request_headers) + last_attempt.request_headers + ) if last_attempt.http_status_code is not None: event_dict['FinalHttpStatusCode'] = last_attempt.http_status_code if last_attempt.parsed_error is not None: self._serialize_parsed_error( - last_attempt.parsed_error, event_dict, 'ApiCall') + last_attempt.parsed_error, event_dict, 'ApiCall' + ) if last_attempt.wire_exception is not None: self._serialize_wire_exception( - last_attempt.wire_exception, event_dict, 'ApiCall') + last_attempt.wire_exception, event_dict, 'ApiCall' + ) def _serialize_latency(self, latency, event_dict, event_type): if event_type == 'ApiCall': @@ -413,15 +437,17 @@ class CSMSerializer(object): elif event_type == 'ApiCallAttempt': event_dict['AttemptLatency'] = latency - def _serialize_retries_exceeded(self, retries_exceeded, event_dict, - **kwargs): - event_dict['MaxRetriesExceeded'] = (1 if retries_exceeded else 0) + def _serialize_retries_exceeded( + self, retries_exceeded, event_dict, **kwargs + ): + event_dict['MaxRetriesExceeded'] = 1 if retries_exceeded else 0 def _serialize_url(self, url, event_dict, **kwargs): event_dict['Fqdn'] = urlparse(url).netloc - def _serialize_request_headers(self, request_headers, event_dict, - **kwargs): + def _serialize_request_headers( + self, request_headers, event_dict, **kwargs + ): event_dict['UserAgent'] = self._get_user_agent(request_headers) if self._is_signed(request_headers): event_dict['AccessKey'] = self._get_access_key(request_headers) @@ -430,34 +456,42 @@ class CSMSerializer(object): event_dict['Region'] = region if 'X-Amz-Security-Token' in request_headers: event_dict['SessionToken'] = request_headers[ - 'X-Amz-Security-Token'] + 'X-Amz-Security-Token' + ] - def _serialize_http_status_code(self, http_status_code, event_dict, - **kwargs): + def _serialize_http_status_code( + self, http_status_code, event_dict, **kwargs + ): event_dict['HttpStatusCode'] = http_status_code - def _serialize_response_headers(self, response_headers, event_dict, - **kwargs): + def _serialize_response_headers( + self, response_headers, event_dict, **kwargs + ): for header, entry in self._RESPONSE_HEADERS_TO_EVENT_ENTRIES.items(): if header in response_headers: event_dict[entry] = response_headers[header] - def _serialize_parsed_error(self, parsed_error, event_dict, event_type, - **kwargs): + def _serialize_parsed_error( + self, parsed_error, event_dict, event_type, **kwargs + ): field_prefix = 'Final' if event_type == 'ApiCall' else '' event_dict[field_prefix + 'AwsException'] = self._truncate( - parsed_error['Code'], self._MAX_ERROR_CODE_LENGTH) + parsed_error['Code'], self._MAX_ERROR_CODE_LENGTH + ) event_dict[field_prefix + 'AwsExceptionMessage'] = self._truncate( - parsed_error['Message'], self._MAX_MESSAGE_LENGTH) + parsed_error['Message'], self._MAX_MESSAGE_LENGTH + ) - def _serialize_wire_exception(self, wire_exception, event_dict, event_type, - **kwargs): + def _serialize_wire_exception( + self, wire_exception, event_dict, event_type, **kwargs + ): field_prefix = 'Final' if event_type == 'ApiCall' else '' event_dict[field_prefix + 'SdkException'] = self._truncate( - wire_exception.__class__.__name__, - self._MAX_EXCEPTION_CLASS_LENGTH) + wire_exception.__class__.__name__, self._MAX_EXCEPTION_CLASS_LENGTH + ) event_dict[field_prefix + 'SdkExceptionMessage'] = self._truncate( - str(wire_exception), self._MAX_MESSAGE_LENGTH) + str(wire_exception), self._MAX_MESSAGE_LENGTH + ) def _get_event_type(self, event): if isinstance(event, APICallEvent): @@ -482,7 +516,7 @@ class CSMSerializer(object): def _get_user_agent(self, request_headers): return self._truncate( ensure_unicode(request_headers.get('User-Agent', '')), - self._MAX_USER_AGENT_LENGTH + self._MAX_USER_AGENT_LENGTH, ) def _is_signed(self, request_headers): @@ -501,13 +535,15 @@ class CSMSerializer(object): def _truncate(self, text, max_length): if len(text) > max_length: logger.debug( - 'Truncating following value to maximum length of ' - '%s: %s', text, max_length) + 'Truncating following value to maximum length of ' '%s: %s', + text, + max_length, + ) return text[:max_length] return text -class SocketPublisher(object): +class SocketPublisher: _MAX_MONITOR_EVENT_LENGTH = 8 * 1024 def __init__(self, socket, host, port, serializer): @@ -543,7 +579,8 @@ class SocketPublisher(object): logger.debug( 'Serialized event of size %s exceeds the maximum length ' 'allowed: %s. Not sending event to socket.', - len(serialized_event), self._MAX_MONITOR_EVENT_LENGTH + len(serialized_event), + self._MAX_MONITOR_EVENT_LENGTH, ) return self._socket.sendto(serialized_event, self._address) diff --git a/contrib/python/botocore/py3/botocore/paginate.py b/contrib/python/botocore/py3/botocore/paginate.py index ded0d5e24f..2678ef8432 100644 --- a/contrib/python/botocore/py3/botocore/paginate.py +++ b/contrib/python/botocore/py3/botocore/paginate.py @@ -18,14 +18,13 @@ from itertools import tee import jmespath -from botocore.compat import six, zip from botocore.exceptions import PaginationError from botocore.utils import merge_dicts, set_value_from_jmespath log = logging.getLogger(__name__) -class TokenEncoder(object): +class TokenEncoder: """Encodes dictionaries into opaque strings. This for the most part json dumps + base64 encoding, but also supports @@ -71,7 +70,7 @@ class TokenEncoder(object): return self._encode_dict(data, path) elif isinstance(data, list): return self._encode_list(data, path) - elif isinstance(data, six.binary_type): + elif isinstance(data, bytes): return self._encode_bytes(data, path) else: return data, [] @@ -103,7 +102,7 @@ class TokenEncoder(object): return base64.b64encode(data).decode('utf-8'), [path] -class TokenDecoder(object): +class TokenDecoder: """Decodes token strings back into dictionaries. This performs the inverse operation to the TokenEncoder, accepting @@ -171,7 +170,7 @@ class TokenDecoder(object): container[path[-1]] = value -class PaginatorModel(object): +class PaginatorModel: def __init__(self, paginator_config): self._paginator_config = paginator_config['pagination'] @@ -179,15 +178,27 @@ class PaginatorModel(object): try: single_paginator_config = self._paginator_config[operation_name] except KeyError: - raise ValueError("Paginator for operation does not exist: %s" - % operation_name) + raise ValueError( + "Paginator for operation does not exist: %s" % operation_name + ) return single_paginator_config -class PageIterator(object): - def __init__(self, method, input_token, output_token, more_results, - result_keys, non_aggregate_keys, limit_key, max_items, - starting_token, page_size, op_kwargs): +class PageIterator: + def __init__( + self, + method, + input_token, + output_token, + more_results, + result_keys, + non_aggregate_keys, + limit_key, + max_items, + starting_token, + page_size, + op_kwargs, + ): self._method = method self._input_token = input_token self._output_token = output_token @@ -236,7 +247,7 @@ class PageIterator(object): def __iter__(self): current_kwargs = self._op_kwargs previous_next_token = None - next_token = dict((key, None) for key in self._input_token) + next_token = {key: None for key in self._input_token} if self._starting_token is not None: # If the starting token exists, populate the next_token with the # values inside it. This ensures that we have the service's @@ -258,7 +269,8 @@ class PageIterator(object): # to index into the retrieved page. if self._starting_token is not None: starting_truncation = self._handle_first_request( - parsed, primary_result_key, starting_truncation) + parsed, primary_result_key, starting_truncation + ) first_request = False self._record_non_aggregate_key_values(parsed) else: @@ -277,8 +289,11 @@ class PageIterator(object): ) if truncate_amount > 0: self._truncate_response( - parsed, primary_result_key, truncate_amount, - starting_truncation, next_token + parsed, + primary_result_key, + truncate_amount, + starting_truncation, + next_token, ) yield response break @@ -288,16 +303,22 @@ class PageIterator(object): next_token = self._get_next_token(parsed) if all(t is None for t in next_token.values()): break - if self._max_items is not None and \ - total_items == self._max_items: + if ( + self._max_items is not None + and total_items == self._max_items + ): # We're on a page boundary so we can set the current # next token to be the resume token. self.resume_token = next_token break - if previous_next_token is not None and \ - previous_next_token == next_token: - message = ("The same next token was received " - "twice: %s" % next_token) + if ( + previous_next_token is not None + and previous_next_token == next_token + ): + message = ( + f"The same next token was received " + f"twice: {next_token}" + ) raise PaginationError(message=message) self._inject_token_into_kwargs(current_kwargs, next_token) previous_next_token = next_token @@ -322,8 +343,7 @@ class PageIterator(object): for page in self: results = compiled.search(page) if isinstance(results, list): - for element in results: - yield element + yield from results else: # Yield result directly if it is not a list. yield results @@ -338,9 +358,9 @@ class PageIterator(object): non_aggregate_keys = {} for expression in self._non_aggregate_key_exprs: result = expression.search(response) - set_value_from_jmespath(non_aggregate_keys, - expression.expression, - result) + set_value_from_jmespath( + non_aggregate_keys, expression.expression, result + ) self._non_aggregate_part = non_aggregate_keys def _inject_starting_params(self, op_kwargs): @@ -363,21 +383,18 @@ class PageIterator(object): elif name in op_kwargs: del op_kwargs[name] - def _handle_first_request(self, parsed, primary_result_key, - starting_truncation): + def _handle_first_request( + self, parsed, primary_result_key, starting_truncation + ): # If the payload is an array or string, we need to slice into it # and only return the truncated amount. starting_truncation = self._parse_starting_token()[1] all_data = primary_result_key.search(parsed) - if isinstance(all_data, (list, six.string_types)): + if isinstance(all_data, (list, str)): data = all_data[starting_truncation:] else: data = None - set_value_from_jmespath( - parsed, - primary_result_key.expression, - data - ) + set_value_from_jmespath(parsed, primary_result_key.expression, data) # We also need to truncate any secondary result keys # because they were not truncated in the previous last # response. @@ -387,7 +404,7 @@ class PageIterator(object): sample = token.search(parsed) if isinstance(sample, list): empty_value = [] - elif isinstance(sample, six.string_types): + elif isinstance(sample, str): empty_value = '' elif isinstance(sample, (int, float)): empty_value = 0 @@ -396,17 +413,21 @@ class PageIterator(object): set_value_from_jmespath(parsed, token.expression, empty_value) return starting_truncation - def _truncate_response(self, parsed, primary_result_key, truncate_amount, - starting_truncation, next_token): + def _truncate_response( + self, + parsed, + primary_result_key, + truncate_amount, + starting_truncation, + next_token, + ): original = primary_result_key.search(parsed) if original is None: original = [] amount_to_keep = len(original) - truncate_amount truncated = original[:amount_to_keep] set_value_from_jmespath( - parsed, - primary_result_key.expression, - truncated + parsed, primary_result_key.expression, truncated ) # The issue here is that even though we know how much we've truncated # we need to account for this globally including any starting @@ -419,8 +440,9 @@ class PageIterator(object): # However, even though we only kept 1, this is post # left truncation so the next starting index should be 2, not 1 # (left_truncation + amount_to_keep). - next_token['boto_truncate_amount'] = \ + next_token['boto_truncate_amount'] = ( amount_to_keep + starting_truncation + ) self.resume_token = next_token def _get_next_token(self, parsed): @@ -428,8 +450,9 @@ class PageIterator(object): if not self._more_results.search(parsed): return {} next_tokens = {} - for output_token, input_key in \ - zip(self._output_token, self._input_token): + for output_token, input_key in zip( + self._output_token, self._input_token + ): next_token = output_token.search(parsed) # We do not want to include any empty strings as actual tokens. # Treat them as None. @@ -441,8 +464,10 @@ class PageIterator(object): def result_key_iters(self): teed_results = tee(self, len(self.result_keys)) - return [ResultKeyIterator(i, result_key) for i, result_key - in zip(teed_results, self.result_keys)] + return [ + ResultKeyIterator(i, result_key) + for i, result_key in zip(teed_results, self.result_keys) + ] def build_full_result(self): complete_result = {} @@ -473,17 +498,21 @@ class PageIterator(object): if existing_value is None: # Set the initial result set_value_from_jmespath( - complete_result, result_expression.expression, - result_value) + complete_result, + result_expression.expression, + result_value, + ) continue # Now both result_value and existing_value contain something if isinstance(result_value, list): existing_value.extend(result_value) - elif isinstance(result_value, (int, float, six.string_types)): + elif isinstance(result_value, (int, float, str)): # Modify the existing result with the sum or concatenation set_value_from_jmespath( - complete_result, result_expression.expression, - existing_value + result_value) + complete_result, + result_expression.expression, + existing_value + result_value, + ) merge_dicts(complete_result, self.non_aggregate_part) if self.resume_token is not None: complete_result['NextToken'] = self.resume_token @@ -510,8 +539,10 @@ class PageIterator(object): This handles parsing of old style starting tokens, and attempts to coerce them into the new style. """ - log.debug("Attempting to fall back to old starting token parser. For " - "token: %s" % self._starting_token) + log.debug( + "Attempting to fall back to old starting token parser. For " + "token: %s" % self._starting_token + ) if self._starting_token is None: return None @@ -543,14 +574,16 @@ class PageIterator(object): if len_deprecated_token > len_input_token: raise ValueError("Bad starting token: %s" % self._starting_token) elif len_deprecated_token < len_input_token: - log.debug("Old format starting token does not contain all input " - "tokens. Setting the rest, in order, as None.") + log.debug( + "Old format starting token does not contain all input " + "tokens. Setting the rest, in order, as None." + ) for i in range(len_input_token - len_deprecated_token): deprecated_token.append(None) return dict(zip(self._input_token, deprecated_token)) -class Paginator(object): +class Paginator: PAGE_ITERATOR_CLS = PageIterator def __init__(self, method, pagination_config, model): @@ -561,7 +594,8 @@ class Paginator(object): self._input_token = self._get_input_tokens(self._pagination_cfg) self._more_results = self._get_more_results_token(self._pagination_cfg) self._non_aggregate_keys = self._get_non_aggregate_keys( - self._pagination_cfg) + self._pagination_cfg + ) self._result_keys = self._get_result_keys(self._pagination_cfg) self._limit_key = self._get_limit_key(self._pagination_cfg) @@ -616,14 +650,18 @@ class Paginator(object): """ page_params = self._extract_paging_params(kwargs) return self.PAGE_ITERATOR_CLS( - self._method, self._input_token, - self._output_token, self._more_results, - self._result_keys, self._non_aggregate_keys, + self._method, + self._input_token, + self._output_token, + self._more_results, + self._result_keys, + self._non_aggregate_keys, self._limit_key, page_params['MaxItems'], page_params['StartingToken'], page_params['PageSize'], - kwargs) + kwargs, + ) def _extract_paging_params(self, kwargs): pagination_config = kwargs.pop('PaginationConfig', {}) @@ -635,11 +673,12 @@ class Paginator(object): if self._limit_key is None: raise PaginationError( message="PageSize parameter is not supported for the " - "pagination interface for this operation.") + "pagination interface for this operation." + ) input_members = self._model.input_shape.members limit_key_shape = input_members.get(self._limit_key) if limit_key_shape.type_name == 'string': - if not isinstance(page_size, six.string_types): + if not isinstance(page_size, str): page_size = str(page_size) else: page_size = int(page_size) @@ -650,7 +689,7 @@ class Paginator(object): } -class ResultKeyIterator(object): +class ResultKeyIterator: """Iterates over the results of paginated responses. Each iterator is associated with a single result key. @@ -673,5 +712,4 @@ class ResultKeyIterator(object): results = self.result_key.search(page) if results is None: results = [] - for result in results: - yield result + yield from results diff --git a/contrib/python/botocore/py3/botocore/parsers.py b/contrib/python/botocore/py3/botocore/parsers.py index 02c913fae1..01c74ab088 100644 --- a/contrib/python/botocore/py3/botocore/parsers.py +++ b/contrib/python/botocore/py3/botocore/parsers.py @@ -133,7 +133,7 @@ LOG = logging.getLogger(__name__) DEFAULT_TIMESTAMP_PARSER = parse_timestamp -class ResponseParserFactory(object): +class ResponseParserFactory: def __init__(self): self._defaults = {} @@ -175,6 +175,7 @@ def _text_content(func): else: text = node_or_string return func(self, shape, text) + return _get_text_content @@ -182,7 +183,7 @@ class ResponseParserError(Exception): pass -class ResponseParser(object): +class ResponseParser: """Base class for response parsing. This class represents the interface that all ResponseParsers for the @@ -195,6 +196,7 @@ class ResponseParser(object): docstring for more info. """ + DEFAULT_ENCODING = 'utf-8' EVENT_STREAM_PARSER_CLS = None @@ -208,7 +210,8 @@ class ResponseParser(object): self._event_stream_parser = None if self.EVENT_STREAM_PARSER_CLS is not None: self._event_stream_parser = self.EVENT_STREAM_PARSER_CLS( - timestamp_parser, blob_parser) + timestamp_parser, blob_parser + ) def _default_blob_parser(self, value): # Blobs are always returned as bytes type (this matters on python3). @@ -296,12 +299,17 @@ class ResponseParser(object): def _do_generic_error_parse(self, response): # There's not really much we can do when we get a generic # html response. - LOG.debug("Received a non protocol specific error response from the " - "service, unable to populate error code and message.") + LOG.debug( + "Received a non protocol specific error response from the " + "service, unable to populate error code and message." + ) return { - 'Error': {'Code': str(response['status_code']), - 'Message': six.moves.http_client.responses.get( - response['status_code'], '')}, + 'Error': { + 'Code': str(response['status_code']), + 'Message': six.moves.http_client.responses.get( + response['status_code'], '' + ), + }, 'ResponseMetadata': {}, } @@ -309,16 +317,17 @@ class ResponseParser(object): raise NotImplementedError("%s._do_parse" % self.__class__.__name__) def _do_error_parse(self, response, shape): - raise NotImplementedError( - "%s._do_error_parse" % self.__class__.__name__) + raise NotImplementedError(f"{self.__class__.__name__}._do_error_parse") def _do_modeled_error_parse(self, response, shape, parsed): raise NotImplementedError( - "%s._do_modeled_error_parse" % self.__class__.__name__) + f"{self.__class__.__name__}._do_modeled_error_parse" + ) def _parse_shape(self, shape, node): - handler = getattr(self, '_handle_%s' % shape.type_name, - self._default_handle) + handler = getattr( + self, f'_handle_{shape.type_name}', self._default_handle + ) return handler(shape, node) def _handle_list(self, shape, node): @@ -366,8 +375,7 @@ class ResponseParser(object): class BaseXMLResponseParser(ResponseParser): def __init__(self, timestamp_parser=None, blob_parser=None): - super(BaseXMLResponseParser, self).__init__(timestamp_parser, - blob_parser) + super().__init__(timestamp_parser, blob_parser) self._namespace_re = re.compile('{.*}') def _handle_map(self, shape, node): @@ -402,7 +410,7 @@ class BaseXMLResponseParser(ResponseParser): # it's flattened, and if it's not, then we make it a one element list. if shape.serialization.get('flattened') and not isinstance(node, list): node = [node] - return super(BaseXMLResponseParser, self)._handle_list(shape, node) + return super()._handle_list(shape, node) def _handle_structure(self, shape, node): parsed = {} @@ -415,8 +423,10 @@ class BaseXMLResponseParser(ResponseParser): return self._handle_unknown_tagged_union_member(tag) for member_name in members: member_shape = members[member_name] - if 'location' in member_shape.serialization or \ - member_shape.serialization.get('eventheader'): + if ( + 'location' in member_shape.serialization + or member_shape.serialization.get('eventheader') + ): # All members with locations have already been handled, # so we don't need to parse these members. continue @@ -424,13 +434,15 @@ class BaseXMLResponseParser(ResponseParser): member_node = xml_dict.get(xml_name) if member_node is not None: parsed[member_name] = self._parse_shape( - member_shape, member_node) + member_shape, member_node + ) elif member_shape.serialization.get('xmlAttribute'): attribs = {} location_name = member_shape.serialization['name'] for key, value in node.attrib.items(): new_key = self._namespace_re.sub( - location_name.split(':')[0] + ':', key) + location_name.split(':')[0] + ':', key + ) attribs[new_key] = value if location_name in attribs: parsed[member_name] = attribs[location_name] @@ -450,7 +462,8 @@ class BaseXMLResponseParser(ResponseParser): # surrounding structure. if shape.type_name == 'list' and shape.serialization.get('flattened'): list_member_serialized_name = shape.member.serialization.get( - 'name') + 'name' + ) if list_member_serialized_name is not None: return list_member_serialized_name serialized_name = shape.serialization.get('name') @@ -484,15 +497,16 @@ class BaseXMLResponseParser(ResponseParser): def _parse_xml_string_to_dom(self, xml_string): try: parser = ETree.XMLParser( - target=ETree.TreeBuilder(), - encoding=self.DEFAULT_ENCODING) + target=ETree.TreeBuilder(), encoding=self.DEFAULT_ENCODING + ) parser.feed(xml_string) root = parser.close() except XMLParseError as e: raise ResponseParserError( "Unable to parse response (%s), " - "invalid XML received. Further retries may succeed:\n%s" % - (e, xml_string)) + "invalid XML received. Further retries may succeed:\n%s" + % (e, xml_string) + ) return root def _replace_nodes(self, parsed): @@ -537,7 +551,6 @@ class BaseXMLResponseParser(ResponseParser): class QueryParser(BaseXMLResponseParser): - def _do_error_parse(self, response, shape): xml_contents = response['body'] root = self._parse_xml_string_to_dom(xml_contents) @@ -568,8 +581,8 @@ class QueryParser(BaseXMLResponseParser): start = root if 'resultWrapper' in shape.serialization: start = self._find_result_wrapped_shape( - shape.serialization['resultWrapper'], - root) + shape.serialization['resultWrapper'], root + ) parsed = self._parse_shape(shape, start) if inject_metadata: self._inject_response_metadata(root, parsed) @@ -590,7 +603,6 @@ class QueryParser(BaseXMLResponseParser): class EC2QueryParser(QueryParser): - def _inject_response_metadata(self, node, inject_into): mapping = self._build_name_to_xml_node(node) child_node = mapping.get('requestId') @@ -610,7 +622,7 @@ class EC2QueryParser(QueryParser): # </Response> # This is different from QueryParser in that it's RequestID, # not RequestId - original = super(EC2QueryParser, self)._do_error_parse(response, shape) + original = super()._do_error_parse(response, shape) if 'RequestID' in original: original['ResponseMetadata'] = { 'RequestId': original.pop('RequestID') @@ -627,7 +639,6 @@ class EC2QueryParser(QueryParser): class BaseJSONParser(ResponseParser): - def _handle_structure(self, shape, value): final_parsed = {} if shape.is_document_type: @@ -649,8 +660,8 @@ class BaseJSONParser(ResponseParser): raw_value = value.get(json_name) if raw_value is not None: final_parsed[member_name] = self._parse_shape( - member_shapes[member_name], - raw_value) + member_shapes[member_name], raw_value + ) return final_parsed def _handle_map(self, shape, value): @@ -680,8 +691,9 @@ class BaseJSONParser(ResponseParser): # The error message can either come in the 'message' or 'Message' key # so we need to check for both. - error['Error']['Message'] = body.get('message', - body.get('Message', '')) + error['Error']['Message'] = body.get( + 'message', body.get('Message', '') + ) # if the message did not contain an error code # include the response status code response_code = response.get('status_code') @@ -698,8 +710,9 @@ class BaseJSONParser(ResponseParser): def _inject_response_metadata(self, parsed, headers): if 'x-amzn-requestid' in headers: - parsed.setdefault('ResponseMetadata', {})['RequestId'] = ( - headers['x-amzn-requestid']) + parsed.setdefault('ResponseMetadata', {})['RequestId'] = headers[ + 'x-amzn-requestid' + ] def _parse_body_as_json(self, body_contents): if not body_contents: @@ -715,17 +728,19 @@ class BaseJSONParser(ResponseParser): class BaseEventStreamParser(ResponseParser): - def _do_parse(self, response, shape): final_parsed = {} if shape.serialization.get('eventstream'): event_type = response['headers'].get(':event-type') event_shape = shape.members.get(event_type) if event_shape: - final_parsed[event_type] = self._do_parse(response, event_shape) + final_parsed[event_type] = self._do_parse( + response, event_shape + ) else: - self._parse_non_payload_attrs(response, shape, - shape.members, final_parsed) + self._parse_non_payload_attrs( + response, shape, shape.members, final_parsed + ) self._parse_payload(response, shape, shape.members, final_parsed) return final_parsed @@ -738,7 +753,7 @@ class BaseEventStreamParser(ResponseParser): error = { 'Error': { 'Code': exception_type, - 'Message': body.get('Message', body.get('message', '')) + 'Message': body.get('Message', body.get('message', '')), } } else: @@ -762,7 +777,9 @@ class BaseEventStreamParser(ResponseParser): parsed_body = body.decode(self.DEFAULT_ENCODING) else: raw_parse = self._initial_body_parse(body) - parsed_body = self._parse_shape(member_shape, raw_parse) + parsed_body = self._parse_shape( + member_shape, raw_parse + ) final_parsed[name] = parsed_body return # If we didn't find an explicit payload, use the current shape @@ -770,8 +787,9 @@ class BaseEventStreamParser(ResponseParser): body_parsed = self._parse_shape(shape, original_parsed) final_parsed.update(body_parsed) - def _parse_non_payload_attrs(self, response, shape, - member_shapes, final_parsed): + def _parse_non_payload_attrs( + self, response, shape, member_shapes, final_parsed + ): headers = response['headers'] for name in member_shapes: member_shape = member_shapes[name] @@ -793,13 +811,11 @@ class BaseEventStreamParser(ResponseParser): class EventStreamJSONParser(BaseEventStreamParser, BaseJSONParser): - def _initial_body_parse(self, body_contents): return self._parse_body_as_json(body_contents) class EventStreamXMLParser(BaseEventStreamParser, BaseXMLResponseParser): - def _initial_body_parse(self, xml_string): if not xml_string: return ETree.Element('') @@ -811,6 +827,7 @@ class JSONParser(BaseJSONParser): EVENT_STREAM_PARSER_CLS = EventStreamJSONParser """Response parser for the "json" protocol.""" + def _do_parse(self, response, shape): parsed = {} if shape is not None: @@ -846,11 +863,11 @@ class JSONParser(BaseJSONParser): class BaseRestParser(ResponseParser): - def _do_parse(self, response, shape): final_parsed = {} final_parsed['ResponseMetadata'] = self._populate_response_metadata( - response) + response + ) self._add_modeled_parse(response, shape, final_parsed) return final_parsed @@ -858,8 +875,9 @@ class BaseRestParser(ResponseParser): if shape is None: return final_parsed member_shapes = shape.members - self._parse_non_payload_attrs(response, shape, - member_shapes, final_parsed) + self._parse_non_payload_attrs( + response, shape, member_shapes, final_parsed + ) self._parse_payload(response, shape, member_shapes, final_parsed) def _do_modeled_error_parse(self, response, shape): @@ -898,14 +916,16 @@ class BaseRestParser(ResponseParser): else: original_parsed = self._initial_body_parse(response['body']) final_parsed[payload_member_name] = self._parse_shape( - body_shape, original_parsed) + body_shape, original_parsed + ) else: original_parsed = self._initial_body_parse(response['body']) body_parsed = self._parse_shape(shape, original_parsed) final_parsed.update(body_parsed) - def _parse_non_payload_attrs(self, response, shape, - member_shapes, final_parsed): + def _parse_non_payload_attrs( + self, response, shape, member_shapes, final_parsed + ): headers = response['headers'] for name in member_shapes: member_shape = member_shapes[name] @@ -914,15 +934,18 @@ class BaseRestParser(ResponseParser): continue elif location == 'statusCode': final_parsed[name] = self._parse_shape( - member_shape, response['status_code']) + member_shape, response['status_code'] + ) elif location == 'headers': - final_parsed[name] = self._parse_header_map(member_shape, - headers) + final_parsed[name] = self._parse_header_map( + member_shape, headers + ) elif location == 'header': header_name = member_shape.serialization.get('name', name) if header_name in headers: final_parsed[name] = self._parse_shape( - member_shape, headers[header_name]) + member_shape, headers[header_name] + ) def _parse_header_map(self, shape, headers): # Note that headers are case insensitive, so we .lower() @@ -933,7 +956,7 @@ class BaseRestParser(ResponseParser): if header_name.lower().startswith(prefix): # The key name inserted into the parsed hash # strips off the prefix. - name = header_name[len(prefix):] + name = header_name[len(prefix) :] parsed[name] = headers[header_name] return parsed @@ -956,7 +979,7 @@ class BaseRestParser(ResponseParser): if location == 'header' and not isinstance(node, list): # List in headers may be a comma separated string as per RFC7230 node = [e.strip() for e in node.split(',')] - return super(BaseRestParser, self)._handle_list(shape, node) + return super()._handle_list(shape, node) class RestJSONParser(BaseRestParser, BaseJSONParser): @@ -967,7 +990,7 @@ class RestJSONParser(BaseRestParser, BaseJSONParser): return self._parse_body_as_json(body_contents) def _do_error_parse(self, response, shape): - error = super(RestJSONParser, self)._do_error_parse(response, shape) + error = super()._do_error_parse(response, shape) self._inject_error_code(error, response) return error @@ -982,8 +1005,7 @@ class RestJSONParser(BaseRestParser, BaseJSONParser): code = code.split(':')[0] error['Error']['Code'] = code elif 'code' in body or 'Code' in body: - error['Error']['Code'] = body.get( - 'code', body.get('Code', '')) + error['Error']['Code'] = body.get('code', body.get('Code', '')) def _handle_integer(self, shape, value): return int(value) @@ -1023,7 +1045,8 @@ class RestXMLParser(BaseRestParser, BaseXMLResponseParser): except ResponseParserError: LOG.debug( 'Exception caught when parsing error response body:', - exc_info=True) + exc_info=True, + ) return self._parse_error_from_http_status(response) def _parse_error_from_http_status(self, response): @@ -1031,12 +1054,13 @@ class RestXMLParser(BaseRestParser, BaseXMLResponseParser): 'Error': { 'Code': str(response['status_code']), 'Message': six.moves.http_client.responses.get( - response['status_code'], ''), + response['status_code'], '' + ), }, 'ResponseMetadata': { 'RequestId': response['headers'].get('x-amz-request-id', ''), 'HostId': response['headers'].get('x-amz-id-2', ''), - } + }, } def _parse_error_from_body(self, response): @@ -1064,7 +1088,7 @@ class RestXMLParser(BaseRestParser, BaseXMLResponseParser): @_text_content def _handle_string(self, shape, text): - text = super(RestXMLParser, self)._handle_string(shape, text) + text = super()._handle_string(shape, text) return text diff --git a/contrib/python/botocore/py3/botocore/regions.py b/contrib/python/botocore/py3/botocore/regions.py index 9953e2b99a..8b7b9ee737 100644 --- a/contrib/python/botocore/py3/botocore/regions.py +++ b/contrib/python/botocore/py3/botocore/regions.py @@ -26,12 +26,13 @@ from botocore.exceptions import ( ) LOG = logging.getLogger(__name__) -DEFAULT_URI_TEMPLATE = '{service}.{region}.{dnsSuffix}' # noqa +DEFAULT_URI_TEMPLATE = '{service}.{region}.{dnsSuffix}' # noqa DEFAULT_SERVICE_DATA = {'endpoints': {}} -class BaseEndpointResolver(object): +class BaseEndpointResolver: """Resolves regions and endpoints. Must be subclassed.""" + def construct_endpoint(self, service_name, region_name=None): """Resolves an endpoint for a service and region combination. @@ -68,8 +69,9 @@ class BaseEndpointResolver(object): """ raise NotImplementedError - def get_available_endpoints(self, service_name, partition_name='aws', - allow_non_regional=False): + def get_available_endpoints( + self, service_name, partition_name='aws', allow_non_regional=False + ): """Lists the endpoint names of a particular partition. :type service_name: string @@ -117,9 +119,13 @@ class EndpointResolver(BaseEndpointResolver): result.append(partition['partition']) return result - def get_available_endpoints(self, service_name, partition_name='aws', - allow_non_regional=False, - endpoint_variant_tags=None): + def get_available_endpoints( + self, + service_name, + partition_name='aws', + allow_non_regional=False, + endpoint_variant_tags=None, + ): result = [] for partition in self._endpoint_data['partitions']: if partition['partition'] != partition_name: @@ -133,8 +139,8 @@ class EndpointResolver(BaseEndpointResolver): # Only regional endpoints can be modeled with variants if endpoint_variant_tags and is_regional_endpoint: variant_data = self._retrieve_variant_data( - service_endpoints[endpoint_name], - endpoint_variant_tags) + service_endpoints[endpoint_name], endpoint_variant_tags + ) if variant_data: result.append(endpoint_name) elif allow_non_regional or is_regional_endpoint: @@ -142,15 +148,14 @@ class EndpointResolver(BaseEndpointResolver): return result def get_partition_dns_suffix( - self, - partition_name, - endpoint_variant_tags=None + self, partition_name, endpoint_variant_tags=None ): for partition in self._endpoint_data['partitions']: if partition['partition'] == partition_name: if endpoint_variant_tags: variant = self._retrieve_variant_data( - partition.get('defaults'), endpoint_variant_tags) + partition.get('defaults'), endpoint_variant_tags + ) if variant and 'dnsSuffix' in variant: return variant['dnsSuffix'] else: @@ -163,7 +168,7 @@ class EndpointResolver(BaseEndpointResolver): region_name=None, partition_name=None, use_dualstack_endpoint=False, - use_fips_endpoint=False + use_fips_endpoint=False, ): if ( service_name == 's3' @@ -180,9 +185,12 @@ class EndpointResolver(BaseEndpointResolver): if valid_partition is not None: result = self._endpoint_for_partition( - valid_partition, service_name, - region_name, use_dualstack_endpoint, use_fips_endpoint, - True + valid_partition, + service_name, + region_name, + use_dualstack_endpoint, + use_fips_endpoint, + True, ) return result return None @@ -190,12 +198,16 @@ class EndpointResolver(BaseEndpointResolver): # Iterate over each partition until a match is found. for partition in self._endpoint_data['partitions']: if use_dualstack_endpoint and ( - partition['partition'] in - self._UNSUPPORTED_DUALSTACK_PARTITIONS): + partition['partition'] + in self._UNSUPPORTED_DUALSTACK_PARTITIONS + ): continue result = self._endpoint_for_partition( - partition, service_name, region_name, use_dualstack_endpoint, - use_fips_endpoint + partition, + service_name, + region_name, + use_dualstack_endpoint, + use_fips_endpoint, ) if result: return result @@ -206,22 +218,33 @@ class EndpointResolver(BaseEndpointResolver): return partition['partition'] raise UnknownRegionError( region_name=region_name, - error_msg='No partition found for provided region_name.' + error_msg='No partition found for provided region_name.', ) - def _endpoint_for_partition(self, partition, service_name, region_name, - use_dualstack_endpoint, use_fips_endpoint, - force_partition=False): + def _endpoint_for_partition( + self, + partition, + service_name, + region_name, + use_dualstack_endpoint, + use_fips_endpoint, + force_partition=False, + ): partition_name = partition["partition"] - if (use_dualstack_endpoint and - partition_name in self._UNSUPPORTED_DUALSTACK_PARTITIONS): - error_msg = ("Dualstack endpoints are currently not supported" - " for %s partition" % partition_name) + if ( + use_dualstack_endpoint + and partition_name in self._UNSUPPORTED_DUALSTACK_PARTITIONS + ): + error_msg = ( + "Dualstack endpoints are currently not supported" + " for %s partition" % partition_name + ) raise EndpointVariantError(tags=['dualstack'], error_msg=error_msg) # Get the service from the partition, or an empty template. service_data = partition['services'].get( - service_name, DEFAULT_SERVICE_DATA) + service_name, DEFAULT_SERVICE_DATA + ) # Use the partition endpoint if no region is supplied. if region_name is None: if 'partitionEndpoint' in service_data: @@ -248,12 +271,19 @@ class EndpointResolver(BaseEndpointResolver): partition_endpoint = service_data.get('partitionEndpoint') is_regionalized = service_data.get('isRegionalized', True) if partition_endpoint and not is_regionalized: - LOG.debug('Using partition endpoint for %s, %s: %s', - service_name, region_name, partition_endpoint) + LOG.debug( + 'Using partition endpoint for %s, %s: %s', + service_name, + region_name, + partition_endpoint, + ) resolve_kwargs['endpoint_name'] = partition_endpoint return self._resolve(**resolve_kwargs) - LOG.debug('Creating a regex based endpoint for %s, %s', - service_name, region_name) + LOG.debug( + 'Creating a regex based endpoint for %s, %s', + service_name, + region_name, + ) return self._resolve(**resolve_kwargs) def _region_match(self, partition, region_name): @@ -278,40 +308,48 @@ class EndpointResolver(BaseEndpointResolver): tags.append('fips') return tags - def _resolve_variant(self, tags, endpoint_data, service_defaults, - partition_defaults): + def _resolve_variant( + self, tags, endpoint_data, service_defaults, partition_defaults + ): result = {} - for variants in [endpoint_data, service_defaults, - partition_defaults]: + for variants in [endpoint_data, service_defaults, partition_defaults]: variant = self._retrieve_variant_data(variants, tags) if variant: self._merge_keys(variant, result) return result - def _resolve(self, partition, service_name, service_data, endpoint_name, - use_dualstack_endpoint, use_fips_endpoint): - endpoint_data = service_data.get('endpoints', {}).get(endpoint_name, {}) + def _resolve( + self, + partition, + service_name, + service_data, + endpoint_name, + use_dualstack_endpoint, + use_fips_endpoint, + ): + endpoint_data = service_data.get('endpoints', {}).get( + endpoint_name, {} + ) if endpoint_data.get('deprecated'): LOG.warning( - 'Client is configured with the deprecated endpoint: %s' % ( - endpoint_name - ) + 'Client is configured with the deprecated endpoint: %s' + % (endpoint_name) ) service_defaults = service_data.get('defaults', {}) partition_defaults = partition.get('defaults', {}) - tags = self._create_tag_list(use_dualstack_endpoint, - use_fips_endpoint) + tags = self._create_tag_list(use_dualstack_endpoint, use_fips_endpoint) if tags: - result = self._resolve_variant(tags, endpoint_data, - service_defaults, - partition_defaults) + result = self._resolve_variant( + tags, endpoint_data, service_defaults, partition_defaults + ) if result == {}: - error_msg = ("Endpoint does not exist for %s in region %s" % ( - service_name, endpoint_name - )) + error_msg = ( + f"Endpoint does not exist for {service_name} " + f"in region {endpoint_name}" + ) raise EndpointVariantError(tags=tags, error_msg=error_msg) self._merge_keys(endpoint_data, result) else: @@ -329,13 +367,20 @@ class EndpointResolver(BaseEndpointResolver): self._merge_keys(partition_defaults, result) result['hostname'] = self._expand_template( - partition, result['hostname'], service_name, endpoint_name, - result['dnsSuffix'] + partition, + result['hostname'], + service_name, + endpoint_name, + result['dnsSuffix'], ) if 'sslCommonName' in result: result['sslCommonName'] = self._expand_template( - partition, result['sslCommonName'], service_name, - endpoint_name, result['dnsSuffix']) + partition, + result['sslCommonName'], + service_name, + endpoint_name, + result['dnsSuffix'], + ) return result @@ -344,8 +389,9 @@ class EndpointResolver(BaseEndpointResolver): if key not in result: result[key] = from_data[key] - def _expand_template(self, partition, template, service_name, - endpoint_name, dnsSuffix): + def _expand_template( + self, partition, template, service_name, endpoint_name, dnsSuffix + ): return template.format( - service=service_name, region=endpoint_name, - dnsSuffix=dnsSuffix) + service=service_name, region=endpoint_name, dnsSuffix=dnsSuffix + ) diff --git a/contrib/python/botocore/py3/botocore/response.py b/contrib/python/botocore/py3/botocore/response.py index b74805cbe2..9f7c2eb692 100644 --- a/contrib/python/botocore/py3/botocore/response.py +++ b/contrib/python/botocore/py3/botocore/response.py @@ -27,9 +27,9 @@ from botocore.exceptions import ( ) # Keep these imported. There's pre-existing code that uses them. -from botocore import ScalarTypes # noqa -from botocore.compat import XMLParseError # noqa -from botocore.hooks import first_non_none_response # noqa +from botocore import ScalarTypes # noqa +from botocore.compat import XMLParseError # noqa +from botocore.hooks import first_non_none_response # noqa logger = logging.getLogger(__name__) @@ -47,6 +47,7 @@ class StreamingBody(IOBase): is raised. """ + _DEFAULT_CHUNK_SIZE = 1024 def __init__(self, raw_stream, content_length): @@ -73,14 +74,14 @@ class StreamingBody(IOBase): # putting in a check here so in case this interface goes away, we'll # know. try: - # To further complicate things, the way to grab the - # underlying socket object from an HTTPResponse is different - # in py2 and py3. So this code has been pushed to botocore.compat. set_socket_timeout(self._raw_stream, timeout) except AttributeError: - logger.error("Cannot access the socket object of " - "a streaming response. It's possible " - "the interface has changed.", exc_info=True) + logger.error( + "Cannot access the socket object of " + "a streaming response. It's possible " + "the interface has changed.", + exc_info=True, + ) raise def readable(self): @@ -113,13 +114,11 @@ class StreamingBody(IOBase): return self._raw_stream.readlines() def __iter__(self): - """Return an iterator to yield 1k chunks from the raw stream. - """ + """Return an iterator to yield 1k chunks from the raw stream.""" return self.iter_chunks(self._DEFAULT_CHUNK_SIZE) def __next__(self): - """Return the next 1k chunk from the raw stream. - """ + """Return the next 1k chunk from the raw stream.""" current_chunk = self.read(self._DEFAULT_CHUNK_SIZE) if current_chunk: return current_chunk @@ -156,11 +155,13 @@ class StreamingBody(IOBase): # See: https://github.com/kennethreitz/requests/issues/1855 # Basically, our http library doesn't do this for us, so we have # to do this ourself. - if self._content_length is not None and \ - self._amount_read != int(self._content_length): + if self._content_length is not None and self._amount_read != int( + self._content_length + ): raise IncompleteReadError( actual_bytes=self._amount_read, - expected_bytes=int(self._content_length)) + expected_bytes=int(self._content_length), + ) def tell(self): return self._raw_stream.tell() @@ -183,10 +184,12 @@ def get_response(operation_model, http_response): response_dict['body'] = http_response.content elif operation_model.has_streaming_output: response_dict['body'] = StreamingBody( - http_response.raw, response_dict['headers'].get('content-length')) + http_response.raw, response_dict['headers'].get('content-length') + ) else: response_dict['body'] = http_response.content parser = parsers.create_parser(protocol) - return http_response, parser.parse(response_dict, - operation_model.output_shape) + return http_response, parser.parse( + response_dict, operation_model.output_shape + ) diff --git a/contrib/python/botocore/py3/botocore/retries/adaptive.py b/contrib/python/botocore/py3/botocore/retries/adaptive.py index 67ab5e1ef9..a7c1fda4d9 100644 --- a/contrib/python/botocore/py3/botocore/retries/adaptive.py +++ b/contrib/python/botocore/py3/botocore/retries/adaptive.py @@ -9,8 +9,9 @@ logger = logging.getLogger(__name__) def register_retry_handler(client): clock = bucket.Clock() - rate_adjustor = throttling.CubicCalculator(starting_max_rate=0, - start_time=clock.current_time()) + rate_adjustor = throttling.CubicCalculator( + starting_max_rate=0, start_time=clock.current_time() + ) token_bucket = bucket.TokenBucket(max_rate=1, clock=clock) rate_clocker = RateClocker(clock) throttling_detector = standard.ThrottlingErrorDetector( @@ -24,20 +25,28 @@ def register_retry_handler(client): clock=clock, ) client.meta.events.register( - 'before-send', limiter.on_sending_request, + 'before-send', + limiter.on_sending_request, ) client.meta.events.register( - 'needs-retry', limiter.on_receiving_response, + 'needs-retry', + limiter.on_receiving_response, ) return limiter -class ClientRateLimiter(object): +class ClientRateLimiter: _MAX_RATE_ADJUST_SCALE = 2.0 - def __init__(self, rate_adjustor, rate_clocker, token_bucket, - throttling_detector, clock): + def __init__( + self, + rate_adjustor, + rate_clocker, + token_bucket, + throttling_detector, + clock, + ): self._rate_adjustor = rate_adjustor self._rate_clocker = rate_clocker self._token_bucket = token_bucket @@ -61,27 +70,39 @@ class ClientRateLimiter(object): if not self._enabled: rate_to_use = measured_rate else: - rate_to_use = min(measured_rate, self._token_bucket.max_rate) + rate_to_use = min( + measured_rate, self._token_bucket.max_rate + ) new_rate = self._rate_adjustor.error_received( - rate_to_use, timestamp) - logger.debug("Throttling response received, new send rate: %s " - "measured rate: %s, token bucket capacity " - "available: %s", new_rate, measured_rate, - self._token_bucket.available_capacity) + rate_to_use, timestamp + ) + logger.debug( + "Throttling response received, new send rate: %s " + "measured rate: %s, token bucket capacity " + "available: %s", + new_rate, + measured_rate, + self._token_bucket.available_capacity, + ) self._enabled = True self._token_bucket.max_rate = min( - new_rate, self._MAX_RATE_ADJUST_SCALE * measured_rate) + new_rate, self._MAX_RATE_ADJUST_SCALE * measured_rate + ) -class RateClocker(object): +class RateClocker: """Tracks the rate at which a client is sending a request.""" _DEFAULT_SMOOTHING = 0.8 # Update the rate every _TIME_BUCKET_RANGE seconds. _TIME_BUCKET_RANGE = 0.5 - def __init__(self, clock, smoothing=_DEFAULT_SMOOTHING, - time_bucket_range=_TIME_BUCKET_RANGE): + def __init__( + self, + clock, + smoothing=_DEFAULT_SMOOTHING, + time_bucket_range=_TIME_BUCKET_RANGE, + ): self._clock = clock self._measured_rate = 0 self._smoothing = smoothing @@ -93,15 +114,15 @@ class RateClocker(object): def record(self, amount=1): with self._lock: t = self._clock.current_time() - bucket = math.floor( - t * self._time_bucket_scale) / self._time_bucket_scale + bucket = ( + math.floor(t * self._time_bucket_scale) + / self._time_bucket_scale + ) self._count += amount if bucket > self._last_bucket: - current_rate = self._count / float( - bucket - self._last_bucket) - self._measured_rate = ( - (current_rate * self._smoothing) + - (self._measured_rate * (1 - self._smoothing)) + current_rate = self._count / float(bucket - self._last_bucket) + self._measured_rate = (current_rate * self._smoothing) + ( + self._measured_rate * (1 - self._smoothing) ) self._count = 0 self._last_bucket = bucket diff --git a/contrib/python/botocore/py3/botocore/retries/base.py b/contrib/python/botocore/py3/botocore/retries/base.py index 008e85e421..108bfed690 100644 --- a/contrib/python/botocore/py3/botocore/retries/base.py +++ b/contrib/python/botocore/py3/botocore/retries/base.py @@ -1,5 +1,4 @@ -class BaseRetryBackoff(object): - +class BaseRetryBackoff: def delay_amount(self, context): """Calculate how long we should delay before retrying. @@ -9,7 +8,7 @@ class BaseRetryBackoff(object): raise NotImplementedError("delay_amount") -class BaseRetryableChecker(object): +class BaseRetryableChecker: """Base class for determining if a retry should happen. This base class checks for specific retryable conditions. diff --git a/contrib/python/botocore/py3/botocore/retries/bucket.py b/contrib/python/botocore/py3/botocore/retries/bucket.py index 99ae5879b6..9637391eca 100644 --- a/contrib/python/botocore/py3/botocore/retries/bucket.py +++ b/contrib/python/botocore/py3/botocore/retries/bucket.py @@ -5,7 +5,7 @@ import time from botocore.exceptions import CapacityNotAvailableError -class Clock(object): +class Clock: def __init__(self): pass @@ -16,7 +16,7 @@ class Clock(object): return time.time() -class TokenBucket(object): +class TokenBucket: _MIN_RATE = 0.5 @@ -52,8 +52,7 @@ class TokenBucket(object): # If we're scaling down, we also can't have a capacity that's # more than our max_capacity. self._current_capacity = min( - self._current_capacity, - self._max_capacity + self._current_capacity, self._max_capacity ) self._new_fill_rate_condition.notify() diff --git a/contrib/python/botocore/py3/botocore/retries/quota.py b/contrib/python/botocore/py3/botocore/retries/quota.py index cc0b3d5d5e..c3e91ae367 100644 --- a/contrib/python/botocore/py3/botocore/retries/quota.py +++ b/contrib/python/botocore/py3/botocore/retries/quota.py @@ -5,7 +5,7 @@ import threading -class RetryQuota(object): +class RetryQuota: INITIAL_CAPACITY = 500 def __init__(self, initial_capacity=INITIAL_CAPACITY, lock=None): @@ -47,8 +47,7 @@ class RetryQuota(object): return with self._lock: amount = min( - self._max_capacity - self._available_capacity, - capacity_amount + self._max_capacity - self._available_capacity, capacity_amount ) self._available_capacity += amount diff --git a/contrib/python/botocore/py3/botocore/retries/special.py b/contrib/python/botocore/py3/botocore/retries/special.py index 1928780d49..c14a089b33 100644 --- a/contrib/python/botocore/py3/botocore/retries/special.py +++ b/contrib/python/botocore/py3/botocore/retries/special.py @@ -41,8 +41,12 @@ class RetryDDBChecksumError(BaseRetryableChecker): checksum = context.http_response.headers.get(self._CHECKSUM_HEADER) if checksum is None: return False - actual_crc32 = crc32(context.http_response.content) & 0xffffffff + actual_crc32 = crc32(context.http_response.content) & 0xFFFFFFFF if actual_crc32 != int(checksum): - logger.debug("DynamoDB crc32 checksum does not match, " - "expected: %s, actual: %s", checksum, actual_crc32) + logger.debug( + "DynamoDB crc32 checksum does not match, " + "expected: %s, actual: %s", + checksum, + actual_crc32, + ) return True diff --git a/contrib/python/botocore/py3/botocore/retries/standard.py b/contrib/python/botocore/py3/botocore/retries/standard.py index e6a4a81a9b..1f73db0cf8 100644 --- a/contrib/python/botocore/py3/botocore/retries/standard.py +++ b/contrib/python/botocore/py3/botocore/retries/standard.py @@ -44,8 +44,9 @@ def register_retry_handler(client, max_attempts=DEFAULT_MAX_ATTEMPTS): service_id = client.meta.service_model.service_id service_event_name = service_id.hyphenize() - client.meta.events.register('after-call.%s' % service_event_name, - retry_quota.release_retry_quota) + client.meta.events.register( + f'after-call.{service_event_name}', retry_quota.release_retry_quota + ) handler = RetryHandler( retry_policy=RetryPolicy( @@ -58,18 +59,20 @@ def register_retry_handler(client, max_attempts=DEFAULT_MAX_ATTEMPTS): unique_id = 'retry-config-%s' % service_event_name client.meta.events.register( - 'needs-retry.%s' % service_event_name, handler.needs_retry, - unique_id=unique_id + 'needs-retry.%s' % service_event_name, + handler.needs_retry, + unique_id=unique_id, ) return handler -class RetryHandler(object): +class RetryHandler: """Bridge between botocore's event system and this module. This class is intended to be hooked to botocore's event system as an event handler. """ + def __init__(self, retry_policy, retry_event_adapter, retry_quota): self._retry_policy = retry_policy self._retry_event_adapter = retry_event_adapter @@ -84,19 +87,22 @@ class RetryHandler(object): # capacity in our retry quota. if self._retry_quota.acquire_retry_quota(context): retry_delay = self._retry_policy.compute_retry_delay(context) - logger.debug("Retry needed, retrying request after " - "delay of: %s", retry_delay) + logger.debug( + "Retry needed, retrying request after delay of: %s", + retry_delay, + ) else: - logger.debug("Retry needed but retry quota reached, " - "not retrying request.") + logger.debug( + "Retry needed but retry quota reached, " + "not retrying request." + ) else: logger.debug("Not retrying request.") - self._retry_event_adapter.adapt_retry_response_from_context( - context) + self._retry_event_adapter.adapt_retry_response_from_context(context) return retry_delay -class RetryEventAdapter(object): +class RetryEventAdapter: """Adapter to existing retry interface used in the endpoints layer. This existing interface for determining if a retry needs to happen @@ -106,6 +112,7 @@ class RetryEventAdapter(object): new retry strategies. """ + def create_retry_context(self, **kwargs): """Create context based on needs-retry kwargs.""" response = kwargs['response'] @@ -138,14 +145,15 @@ class RetryEventAdapter(object): # don't mutate any input parameters from the needs-retry event. metadata = context.get_retry_metadata() if context.parsed_response is not None: - context.parsed_response.setdefault( - 'ResponseMetadata', {}).update(metadata) + context.parsed_response.setdefault('ResponseMetadata', {}).update( + metadata + ) # Implementation note: this is meant to encapsulate all the misc stuff # that gets sent in the needs-retry event. This is mapped so that params # are more clear and explicit. -class RetryContext(object): +class RetryContext: """Normalize a response that we use to check if a retry should occur. This class smoothes over the different types of responses we may get @@ -168,9 +176,16 @@ class RetryContext(object): are meant to be modified directly. """ - def __init__(self, attempt_number, operation_model=None, - parsed_response=None, http_response=None, - caught_exception=None, request_context=None): + + def __init__( + self, + attempt_number, + operation_model=None, + parsed_response=None, + http_response=None, + caught_exception=None, + request_context=None, + ): # 1-based attempt number. self.attempt_number = attempt_number self.operation_model = operation_model @@ -221,7 +236,7 @@ class RetryContext(object): return self._retry_metadata.copy() -class RetryPolicy(object): +class RetryPolicy: def __init__(self, retry_checker, retry_backoff): self._retry_checker = retry_checker self._retry_backoff = retry_backoff @@ -259,7 +274,7 @@ class ExponentialBackoff(BaseRetryBackoff): # want the first delay to just be ``rand(0, 1)``. return min( self._random() * (self._base ** (context.attempt_number - 1)), - self._max_backoff + self._max_backoff, ) @@ -292,9 +307,12 @@ class TransientRetryableChecker(BaseRetryableChecker): HTTPClientError, ) - def __init__(self, transient_error_codes=None, - transient_status_codes=None, - transient_exception_cls=None): + def __init__( + self, + transient_error_codes=None, + transient_status_codes=None, + transient_exception_cls=None, + ): if transient_error_codes is None: transient_error_codes = self._TRANSIENT_ERROR_CODES[:] if transient_status_codes is None: @@ -309,12 +327,15 @@ class TransientRetryableChecker(BaseRetryableChecker): if context.get_error_code() in self._transient_error_codes: return True if context.http_response is not None: - if context.http_response.status_code in \ - self._transient_status_codes: + if ( + context.http_response.status_code + in self._transient_status_codes + ): return True if context.caught_exception is not None: - return isinstance(context.caught_exception, - self._transient_exception_cls) + return isinstance( + context.caught_exception, self._transient_exception_cls + ) return False @@ -362,8 +383,9 @@ class ModeledRetryableChecker(BaseRetryableChecker): return self._error_detector.detect_error_type(context) is not None -class ModeledRetryErrorDetector(object): +class ModeledRetryErrorDetector: """Checks whether or not an error is a modeled retryable error.""" + # There are return values from the detect_error_type() method. TRANSIENT_ERROR = 'TRANSIENT_ERROR' THROTTLING_ERROR = 'THROTTLING_ERROR' @@ -398,7 +420,7 @@ class ModeledRetryErrorDetector(object): return self.TRANSIENT_ERROR -class ThrottlingErrorDetector(object): +class ThrottlingErrorDetector: def __init__(self, retry_event_adapter): self._modeled_error_detector = ModeledRetryErrorDetector() self._fixed_error_code_detector = ThrottledRetryableChecker() @@ -426,21 +448,24 @@ class StandardRetryConditions(BaseRetryableChecker): # Note: This class is for convenience so you can have the # standard retry condition in a single class. self._max_attempts_checker = MaxAttemptsChecker(max_attempts) - self._additional_checkers = OrRetryChecker([ - TransientRetryableChecker(), - ThrottledRetryableChecker(), - ModeledRetryableChecker(), - OrRetryChecker([ - special.RetryIDPCommunicationError(), - special.RetryDDBChecksumError(), - ]) - ]) + self._additional_checkers = OrRetryChecker( + [ + TransientRetryableChecker(), + ThrottledRetryableChecker(), + ModeledRetryableChecker(), + OrRetryChecker( + [ + special.RetryIDPCommunicationError(), + special.RetryDDBChecksumError(), + ] + ), + ] + ) def is_retryable(self, context): - return ( - self._max_attempts_checker.is_retryable(context) - and self._additional_checkers.is_retryable(context) - ) + return self._max_attempts_checker.is_retryable( + context + ) and self._additional_checkers.is_retryable(context) class OrRetryChecker(BaseRetryableChecker): @@ -451,7 +476,7 @@ class OrRetryChecker(BaseRetryableChecker): return any(checker.is_retryable(context) for checker in self._checkers) -class RetryQuotaChecker(object): +class RetryQuotaChecker: _RETRY_COST = 5 _NO_RETRY_INCREMENT = 1 _TIMEOUT_RETRY_REQUEST = 10 diff --git a/contrib/python/botocore/py3/botocore/retries/throttling.py b/contrib/python/botocore/py3/botocore/retries/throttling.py index 2143f394bd..8ad49ebe59 100644 --- a/contrib/python/botocore/py3/botocore/retries/throttling.py +++ b/contrib/python/botocore/py3/botocore/retries/throttling.py @@ -3,13 +3,17 @@ from collections import namedtuple CubicParams = namedtuple('CubicParams', ['w_max', 'k', 'last_fail']) -class CubicCalculator(object): +class CubicCalculator: _SCALE_CONSTANT = 0.4 _BETA = 0.7 - def __init__(self, starting_max_rate, - start_time, - scale_constant=_SCALE_CONSTANT, beta=_BETA): + def __init__( + self, + starting_max_rate, + start_time, + scale_constant=_SCALE_CONSTANT, + beta=_BETA, + ): self._w_max = starting_max_rate self._scale_constant = scale_constant self._beta = beta @@ -22,9 +26,7 @@ class CubicCalculator(object): def success_received(self, timestamp): dt = timestamp - self._last_fail - new_rate = ( - self._scale_constant * (dt - self._k) ** 3 + self._w_max - ) + new_rate = self._scale_constant * (dt - self._k) ** 3 + self._w_max return new_rate def error_received(self, current_rate, timestamp): @@ -48,7 +50,5 @@ class CubicCalculator(object): """ return CubicParams( - w_max=self._w_max, - k=self._k, - last_fail=self._last_fail + w_max=self._w_max, k=self._k, last_fail=self._last_fail ) diff --git a/contrib/python/botocore/py3/botocore/retryhandler.py b/contrib/python/botocore/py3/botocore/retryhandler.py index b411322759..d4b1ba56c6 100644 --- a/contrib/python/botocore/py3/botocore/retryhandler.py +++ b/contrib/python/botocore/py3/botocore/retryhandler.py @@ -32,8 +32,10 @@ logger = logging.getLogger(__name__) # this mapping with more specific exceptions. EXCEPTION_MAP = { 'GENERAL_CONNECTION_ERROR': [ - ConnectionError, ConnectionClosedError, ReadTimeoutError, - EndpointConnectionError + ConnectionError, + ConnectionClosedError, + ReadTimeoutError, + EndpointConnectionError, ], } @@ -54,8 +56,9 @@ def delay_exponential(base, growth_factor, attempts): if base == 'rand': base = random.random() elif base <= 0: - raise ValueError("The 'base' param must be greater than 0, " - "got: %s" % base) + raise ValueError( + f"The 'base' param must be greater than 0, got: {base}" + ) time_to_sleep = base * (growth_factor ** (attempts - 1)) return time_to_sleep @@ -68,14 +71,17 @@ def create_exponential_delay_function(base, growth_factor): """ return functools.partial( - delay_exponential, base=base, growth_factor=growth_factor) + delay_exponential, base=base, growth_factor=growth_factor + ) def create_retry_handler(config, operation_name=None): checker = create_checker_from_retry_config( - config, operation_name=operation_name) + config, operation_name=operation_name + ) action = create_retry_action_from_config( - config, operation_name=operation_name) + config, operation_name=operation_name + ) return RetryHandler(checker=checker, action=action) @@ -88,7 +94,8 @@ def create_retry_action_from_config(config, operation_name=None): if delay_config['type'] == 'exponential': return create_exponential_delay_function( base=delay_config['base'], - growth_factor=delay_config['growth_factor']) + growth_factor=delay_config['growth_factor'], + ) def create_checker_from_retry_config(config, operation_name=None): @@ -109,7 +116,8 @@ def create_checker_from_retry_config(config, operation_name=None): for key in operation_policies: checkers.append(_create_single_checker(operation_policies[key])) retry_exception = _extract_retryable_exception( - operation_policies[key]) + operation_policies[key] + ) if retry_exception is not None: retryable_exceptions.extend(retry_exception) if len(checkers) == 1: @@ -118,14 +126,17 @@ def create_checker_from_retry_config(config, operation_name=None): else: multi_checker = MultiChecker(checkers) return MaxAttemptsDecorator( - multi_checker, max_attempts=max_attempts, - retryable_exceptions=tuple(retryable_exceptions)) + multi_checker, + max_attempts=max_attempts, + retryable_exceptions=tuple(retryable_exceptions), + ) def _create_single_checker(config): if 'response' in config['applies_when']: return _create_single_response_checker( - config['applies_when']['response']) + config['applies_when']['response'] + ) elif 'socket_errors' in config['applies_when']: return ExceptionRaiser() @@ -134,10 +145,12 @@ def _create_single_response_checker(response): if 'service_error_code' in response: checker = ServiceErrorCodeChecker( status_code=response['http_status_code'], - error_code=response['service_error_code']) + error_code=response['service_error_code'], + ) elif 'http_status_code' in response: checker = HTTPStatusCodeChecker( - status_code=response['http_status_code']) + status_code=response['http_status_code'] + ) elif 'crc32body' in response: checker = CRC32Checker(header=response['crc32body']) else: @@ -157,7 +170,7 @@ def _extract_retryable_exception(config): return exceptions -class RetryHandler(object): +class RetryHandler: """Retry handler. The retry handler takes two params, ``checker`` object @@ -185,7 +198,7 @@ class RetryHandler(object): checker_kwargs = { 'attempt_number': attempts, 'response': response, - 'caught_exception': caught_exception + 'caught_exception': caught_exception, } if isinstance(self._checker, MaxAttemptsDecorator): retries_context = kwargs['request_dict']['context'].get('retries') @@ -198,13 +211,14 @@ class RetryHandler(object): logger.debug("No retry needed.") -class BaseChecker(object): +class BaseChecker: """Base class for retry checkers. Each class is responsible for checking a single criteria that determines whether or not a retry should not happen. """ + def __call__(self, attempt_number, response, caught_exception): """Determine if retry criteria matches. @@ -231,7 +245,8 @@ class BaseChecker(object): return self._check_response(attempt_number, response) elif caught_exception is not None: return self._check_caught_exception( - attempt_number, caught_exception) + attempt_number, caught_exception + ) else: raise ValueError("Both response and caught_exception are None.") @@ -252,27 +267,34 @@ class MaxAttemptsDecorator(BaseChecker): that was previously being caught will be raised. """ + def __init__(self, checker, max_attempts, retryable_exceptions=None): self._checker = checker self._max_attempts = max_attempts self._retryable_exceptions = retryable_exceptions - def __call__(self, attempt_number, response, caught_exception, - retries_context): + def __call__( + self, attempt_number, response, caught_exception, retries_context + ): if retries_context: retries_context['max'] = max( retries_context.get('max', 0), self._max_attempts ) - should_retry = self._should_retry(attempt_number, response, - caught_exception) + should_retry = self._should_retry( + attempt_number, response, caught_exception + ) if should_retry: if attempt_number >= self._max_attempts: # explicitly set MaxAttemptsReached if response is not None and 'ResponseMetadata' in response[1]: - response[1]['ResponseMetadata']['MaxAttemptsReached'] = True - logger.debug("Reached the maximum number of retry " - "attempts: %s", attempt_number) + response[1]['ResponseMetadata'][ + 'MaxAttemptsReached' + ] = True + logger.debug( + "Reached the maximum number of retry attempts: %s", + attempt_number, + ) return False else: return should_retry @@ -280,13 +302,17 @@ class MaxAttemptsDecorator(BaseChecker): return False def _should_retry(self, attempt_number, response, caught_exception): - if self._retryable_exceptions and \ - attempt_number < self._max_attempts: + if self._retryable_exceptions and attempt_number < self._max_attempts: try: - return self._checker(attempt_number, response, caught_exception) + return self._checker( + attempt_number, response, caught_exception + ) except self._retryable_exceptions as e: - logger.debug("retry needed, retryable exception caught: %s", - e, exc_info=True) + logger.debug( + "retry needed, retryable exception caught: %s", + e, + exc_info=True, + ) return True else: # If we've exceeded the max attempts we just let the exception @@ -302,7 +328,8 @@ class HTTPStatusCodeChecker(BaseChecker): if response[0].status_code == self._status_code: logger.debug( "retry needed: retryable HTTP status code received: %s", - self._status_code) + self._status_code, + ) return True else: return False @@ -319,7 +346,10 @@ class ServiceErrorCodeChecker(BaseChecker): if actual_error_code == self._error_code: logger.debug( "retry needed: matching HTTP status and error code seen: " - "%s, %s", self._status_code, self._error_code) + "%s, %s", + self._status_code, + self._error_code, + ) return True return False @@ -330,8 +360,9 @@ class MultiChecker(BaseChecker): def __call__(self, attempt_number, response, caught_exception): for checker in self._checkers: - checker_response = checker(attempt_number, response, - caught_exception) + checker_response = checker( + attempt_number, response, caught_exception + ) if checker_response: return checker_response return False @@ -346,17 +377,25 @@ class CRC32Checker(BaseChecker): http_response = response[0] expected_crc = http_response.headers.get(self._header_name) if expected_crc is None: - logger.debug("crc32 check skipped, the %s header is not " - "in the http response.", self._header_name) + logger.debug( + "crc32 check skipped, the %s header is not " + "in the http response.", + self._header_name, + ) else: - actual_crc32 = crc32(response[0].content) & 0xffffffff + actual_crc32 = crc32(response[0].content) & 0xFFFFFFFF if not actual_crc32 == int(expected_crc): logger.debug( "retry needed: crc32 check failed, expected != actual: " - "%s != %s", int(expected_crc), actual_crc32) - raise ChecksumError(checksum_type='crc32', - expected_checksum=int(expected_crc), - actual_checksum=actual_crc32) + "%s != %s", + int(expected_crc), + actual_crc32, + ) + raise ChecksumError( + checksum_type='crc32', + expected_checksum=int(expected_crc), + actual_checksum=actual_crc32, + ) class ExceptionRaiser(BaseChecker): @@ -365,6 +404,7 @@ class ExceptionRaiser(BaseChecker): This class will raise any non None ``caught_exception``. """ + def _check_caught_exception(self, attempt_number, caught_exception): # This is implementation specific, but this class is useful by # coordinating with the MaxAttemptsDecorator. diff --git a/contrib/python/botocore/py3/botocore/serialize.py b/contrib/python/botocore/py3/botocore/serialize.py index f3bd723a7c..0c4b12403c 100644 --- a/contrib/python/botocore/py3/botocore/serialize.py +++ b/contrib/python/botocore/py3/botocore/serialize.py @@ -40,11 +40,12 @@ and if a str/unicode type is passed in, it will be encoded as utf-8. import base64 import calendar import datetime +import json import re from xml.etree import ElementTree from botocore import validate -from botocore.compat import formatdate, json, six +from botocore.compat import formatdate from botocore.utils import ( has_header, is_json_value_header, @@ -68,7 +69,7 @@ def create_serializer(protocol_name, include_validation=True): return serializer -class Serializer(object): +class Serializer: DEFAULT_METHOD = 'POST' # Clients can change this to a different MutableMapping # (i.e OrderedDict) if they want. This is used in the @@ -124,7 +125,7 @@ class Serializer(object): 'method': self.DEFAULT_METHOD, 'headers': {}, # An empty body is represented as an empty byte string. - 'body': b'' + 'body': b'', } return serialized @@ -150,8 +151,7 @@ class Serializer(object): timestamp_format = self.TIMESTAMP_FORMAT timestamp_format = timestamp_format.lower() datetime_obj = parse_to_aware_datetime(value) - converter = getattr( - self, '_timestamp_%s' % timestamp_format) + converter = getattr(self, f'_timestamp_{timestamp_format}') final_value = converter(datetime_obj) return final_value @@ -164,10 +164,9 @@ class Serializer(object): # Returns the base64-encoded version of value, handling # both strings and bytes. The returned value is a string # via the default encoding. - if isinstance(value, six.text_type): + if isinstance(value, str): value = value.encode(self.DEFAULT_ENCODING) - return base64.b64encode(value).strip().decode( - self.DEFAULT_ENCODING) + return base64.b64encode(value).strip().decode(self.DEFAULT_ENCODING) def _expand_host_prefix(self, parameters, operation_model): operation_endpoint = operation_model.endpoint @@ -177,10 +176,11 @@ class Serializer(object): host_prefix_expression = operation_endpoint['hostPrefix'] input_members = operation_model.input_shape.members host_labels = [ - member for member, shape in input_members.items() + member + for member, shape in input_members.items() if shape.serialization.get('hostLabel') ] - format_kwargs = dict((name, parameters[name]) for name in host_labels) + format_kwargs = {name: parameters[name] for name in host_labels} return host_prefix_expression.format(**format_kwargs) @@ -192,8 +192,9 @@ class QuerySerializer(Serializer): def serialize_to_request(self, parameters, operation_model): shape = operation_model.input_shape serialized = self._create_default_request() - serialized['method'] = operation_model.http.get('method', - self.DEFAULT_METHOD) + serialized['method'] = operation_model.http.get( + 'method', self.DEFAULT_METHOD + ) serialized['headers'] = { 'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8' } @@ -220,8 +221,11 @@ class QuerySerializer(Serializer): # input. # prefix: The incrementally built up prefix for the serialized # key (i.e Foo.bar.members.1). - method = getattr(self, '_serialize_type_%s' % shape.type_name, - self._default_serialize) + method = getattr( + self, + f'_serialize_type_{shape.type_name}', + self._default_serialize, + ) method(serialized, value, shape, prefix=prefix) def _serialize_type_structure(self, serialized, value, shape, prefix=''): @@ -230,7 +234,7 @@ class QuerySerializer(Serializer): member_shape = members[key] member_prefix = self._get_serialized_name(member_shape, key) if prefix: - member_prefix = '%s.%s' % (prefix, member_prefix) + member_prefix = f'{prefix}.{member_prefix}' self._serialize(serialized, value, member_shape, member_prefix) def _serialize_type_list(self, serialized, value, shape, prefix=''): @@ -246,9 +250,9 @@ class QuerySerializer(Serializer): list_prefix = '.'.join(prefix.split('.')[:-1] + [name]) else: list_name = shape.member.serialization.get('name', 'member') - list_prefix = '%s.%s' % (prefix, list_name) + list_prefix = f'{prefix}.{list_name}' for i, element in enumerate(value, 1): - element_prefix = '%s.%s' % (list_prefix, i) + element_prefix = f'{list_prefix}.{i}' element_shape = shape.member self._serialize(serialized, element, element_shape, element_prefix) @@ -274,7 +278,8 @@ class QuerySerializer(Serializer): def _serialize_type_timestamp(self, serialized, value, shape, prefix=''): serialized[prefix] = self._convert_timestamp_to_str( - value, shape.serialization.get('timestampFormat')) + value, shape.serialization.get('timestampFormat') + ) def _serialize_type_boolean(self, serialized, value, shape, prefix=''): if value: @@ -314,7 +319,7 @@ class EC2Serializer(QuerySerializer): def _serialize_type_list(self, serialized, value, shape, prefix=''): for i, element in enumerate(value, 1): - element_prefix = '%s.%s' % (prefix, i) + element_prefix = f'{prefix}.{i}' element_shape = shape.member self._serialize(serialized, element, element_shape, element_prefix) @@ -323,12 +328,15 @@ class JSONSerializer(Serializer): TIMESTAMP_FORMAT = 'unixtimestamp' def serialize_to_request(self, parameters, operation_model): - target = '%s.%s' % (operation_model.metadata['targetPrefix'], - operation_model.name) + target = '{}.{}'.format( + operation_model.metadata['targetPrefix'], + operation_model.name, + ) json_version = operation_model.metadata['jsonVersion'] serialized = self._create_default_request() - serialized['method'] = operation_model.http.get('method', - self.DEFAULT_METHOD) + serialized['method'] = operation_model.http.get( + 'method', self.DEFAULT_METHOD + ) serialized['headers'] = { 'X-Amz-Target': target, 'Content-Type': 'application/x-amz-json-%s' % json_version, @@ -346,8 +354,11 @@ class JSONSerializer(Serializer): return serialized def _serialize(self, serialized, value, shape, key=None): - method = getattr(self, '_serialize_type_%s' % shape.type_name, - self._default_serialize) + method = getattr( + self, + '_serialize_type_%s' % shape.type_name, + self._default_serialize, + ) method(serialized, value, shape, key) def _serialize_type_structure(self, serialized, value, shape, key): @@ -368,7 +379,9 @@ class JSONSerializer(Serializer): member_shape = members[member_key] if 'name' in member_shape.serialization: member_key = member_shape.serialization['name'] - self._serialize(serialized, member_value, member_shape, member_key) + self._serialize( + serialized, member_value, member_shape, member_key + ) def _serialize_type_map(self, serialized, value, shape, key): map_obj = self.MAP_TYPE() @@ -393,7 +406,8 @@ class JSONSerializer(Serializer): def _serialize_type_timestamp(self, serialized, value, shape, key): serialized[key] = self._convert_timestamp_to_str( - value, shape.serialization.get('timestampFormat')) + value, shape.serialization.get('timestampFormat') + ) def _serialize_type_blob(self, serialized, value, shape, key): serialized[key] = self._get_base64(value) @@ -409,6 +423,7 @@ class BaseRestSerializer(Serializer): Subclasses must implement the ``_serialize_body_params`` method. """ + QUERY_STRING_TIMESTAMP_FORMAT = 'iso8601' HEADER_TIMESTAMP_FORMAT = 'rfc822' # This is a list of known values for the "location" key in the @@ -418,8 +433,9 @@ class BaseRestSerializer(Serializer): def serialize_to_request(self, parameters, operation_model): serialized = self._create_default_request() - serialized['method'] = operation_model.http.get('method', - self.DEFAULT_METHOD) + serialized['method'] = operation_model.http.get( + 'method', self.DEFAULT_METHOD + ) shape = operation_model.input_shape if shape is None: serialized['url_path'] = operation_model.http['requestUri'] @@ -445,19 +461,21 @@ class BaseRestSerializer(Serializer): if param_value is None: # Don't serialize any parameter with a None value. continue - self._partition_parameters(partitioned, param_name, param_value, - shape_members) + self._partition_parameters( + partitioned, param_name, param_value, shape_members + ) serialized['url_path'] = self._render_uri_template( - operation_model.http['requestUri'], - partitioned['uri_path_kwargs']) + operation_model.http['requestUri'], partitioned['uri_path_kwargs'] + ) # Note that we lean on the http implementation to handle the case # where the requestUri path already has query parameters. # The bundled http client, requests, already supports this. serialized['query_string'] = partitioned['query_string_kwargs'] if partitioned['headers']: serialized['headers'] = partitioned['headers'] - self._serialize_payload(partitioned, parameters, - serialized, shape, shape_members) + self._serialize_payload( + partitioned, parameters, serialized, shape, shape_members + ) self._serialize_content_type(serialized, shape, shape_members) host_prefix = self._expand_host_prefix(parameters, operation_model) @@ -477,14 +495,17 @@ class BaseRestSerializer(Serializer): for template_param in re.findall(r'{(.*?)}', uri_template): if template_param.endswith('+'): encoded_params[template_param] = percent_encode( - params[template_param[:-1]], safe='/~') + params[template_param[:-1]], safe='/~' + ) else: encoded_params[template_param] = percent_encode( - params[template_param]) + params[template_param] + ) return uri_template.format(**encoded_params) - def _serialize_payload(self, partitioned, parameters, - serialized, shape, shape_members): + def _serialize_payload( + self, partitioned, parameters, serialized, shape, shape_members + ): # partitioned - The user input params partitioned by location. # parameters - The user input params. # serialized - The final serialized request dict. @@ -503,13 +524,14 @@ class BaseRestSerializer(Serializer): body_params = parameters.get(payload_member) if body_params is not None: serialized['body'] = self._serialize_body_params( - body_params, - shape_members[payload_member]) + body_params, shape_members[payload_member] + ) else: serialized['body'] = self._serialize_empty_body() elif partitioned['body_kwargs']: serialized['body'] = self._serialize_body_params( - partitioned['body_kwargs'], shape) + partitioned['body_kwargs'], shape + ) elif self._requires_empty_body(shape): serialized['body'] = self._serialize_empty_body() @@ -533,18 +555,19 @@ class BaseRestSerializer(Serializer): def _has_streaming_payload(self, payload, shape_members): """Determine if payload is streaming (a blob or string).""" - return ( - payload is not None and - shape_members[payload].type_name in ['blob', 'string'] + return payload is not None and shape_members[payload].type_name in ( + 'blob', + 'string', ) def _encode_payload(self, body): - if isinstance(body, six.text_type): + if isinstance(body, str): return body.encode(self.DEFAULT_ENCODING) return body - def _partition_parameters(self, partitioned, param_name, - param_value, shape_members): + def _partition_parameters( + self, partitioned, param_name, param_value, shape_members + ): # This takes the user provided input parameter (``param``) # and figures out where they go in the request dict. # Some params are HTTP headers, some are used in the URI, some @@ -562,7 +585,8 @@ class BaseRestSerializer(Serializer): partitioned['query_string_kwargs'][key_name] = bool_str elif member.type_name == 'timestamp': timestamp_format = member.serialization.get( - 'timestampFormat', self.QUERY_STRING_TIMESTAMP_FORMAT) + 'timestampFormat', self.QUERY_STRING_TIMESTAMP_FORMAT + ) timestamp = self._convert_timestamp_to_str( param_value, timestamp_format ) @@ -584,9 +608,9 @@ class BaseRestSerializer(Serializer): # creating multiple header key/val pairs. The key # name to use for each header is the header_prefix (``key_name``) # plus the key provided by the user. - self._do_serialize_header_map(header_prefix, - partitioned['headers'], - param_value) + self._do_serialize_header_map( + header_prefix, partitioned['headers'], param_value + ) else: partitioned['body_kwargs'][param_name] = param_value @@ -603,12 +627,14 @@ class BaseRestSerializer(Serializer): datetime_obj = parse_to_aware_datetime(value) timestamp = calendar.timegm(datetime_obj.utctimetuple()) timestamp_format = shape.serialization.get( - 'timestampFormat', self.HEADER_TIMESTAMP_FORMAT) + 'timestampFormat', self.HEADER_TIMESTAMP_FORMAT + ) return self._convert_timestamp_to_str(timestamp, timestamp_format) elif shape.type_name == 'list': converted_value = [ self._convert_header_value(shape.member, v) - for v in value if v is not None + for v in value + if v is not None ] return ",".join(converted_value) elif is_json_value_header(shape): @@ -620,7 +646,6 @@ class BaseRestSerializer(Serializer): class RestJSONSerializer(BaseRestSerializer, JSONSerializer): - def _serialize_empty_body(self): return b'{}' @@ -663,8 +688,11 @@ class RestXMLSerializer(BaseRestSerializer): return ElementTree.tostring(real_root, encoding=self.DEFAULT_ENCODING) def _serialize(self, shape, params, xmlnode, name): - method = getattr(self, '_serialize_type_%s' % shape.type_name, - self._default_serialize) + method = getattr( + self, + '_serialize_type_%s' % shape.type_name, + self._default_serialize, + ) method(xmlnode, params, shape, name) def _serialize_type_structure(self, xmlnode, params, shape, name): @@ -718,8 +746,9 @@ class RestXMLSerializer(BaseRestSerializer): for key, value in params.items(): entry_node = ElementTree.SubElement(node, 'entry') key_name = self._get_serialized_name(shape.key, default_name='key') - val_name = self._get_serialized_name(shape.value, - default_name='value') + val_name = self._get_serialized_name( + shape.value, default_name='value' + ) self._serialize(shape.key, key, entry_node, key_name) self._serialize(shape.value, value, entry_node, val_name) @@ -741,11 +770,12 @@ class RestXMLSerializer(BaseRestSerializer): def _serialize_type_timestamp(self, xmlnode, params, shape, name): node = ElementTree.SubElement(xmlnode, name) node.text = self._convert_timestamp_to_str( - params, shape.serialization.get('timestampFormat')) + params, shape.serialization.get('timestampFormat') + ) def _default_serialize(self, xmlnode, params, shape, name): node = ElementTree.SubElement(xmlnode, name) - node.text = six.text_type(params) + node.text = str(params) SERIALIZERS = { diff --git a/contrib/python/botocore/py3/botocore/session.py b/contrib/python/botocore/py3/botocore/session.py index 7abc7aeb5b..729c2b0e94 100644 --- a/contrib/python/botocore/py3/botocore/session.py +++ b/contrib/python/botocore/py3/botocore/session.py @@ -71,7 +71,7 @@ from botocore.utils import ( logger = logging.getLogger(__name__) -class Session(object): +class Session: """ The Session object collects together useful functionality from `botocore` as well as important data such as configuration @@ -87,8 +87,13 @@ class Session(object): #: The default format string to use when configuring the botocore logger. LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' - def __init__(self, session_vars=None, event_hooks=None, - include_builtin_handlers=True, profile=None): + def __init__( + self, + session_vars=None, + event_hooks=None, + include_builtin_handlers=True, + profile=None, + ): """ Create a new Session object. @@ -161,7 +166,8 @@ class Session(object): def _register_credential_provider(self): self._components.lazy_register_component( - 'credential_provider', self._create_credential_resolver) + 'credential_provider', self._create_credential_resolver + ) def _create_credential_resolver(self): return botocore.credentials.create_credential_resolver( @@ -171,41 +177,52 @@ class Session(object): def _register_data_loader(self): self._components.lazy_register_component( 'data_loader', - lambda: create_loader(self.get_config_variable('data_path'))) + lambda: create_loader(self.get_config_variable('data_path')), + ) def _register_endpoint_resolver(self): def create_default_resolver(): loader = self.get_component('data_loader') endpoints = loader.load_data('endpoints') return EndpointResolver(endpoints) + self._internal_components.lazy_register_component( - 'endpoint_resolver', create_default_resolver) + 'endpoint_resolver', create_default_resolver + ) def _register_default_config_resolver(self): def create_default_config_resolver(): loader = self.get_component('data_loader') defaults = loader.load_data('sdk-default-configuration') return DefaultConfigResolver(defaults) + self._internal_components.lazy_register_component( - 'default_config_resolver', create_default_config_resolver) + 'default_config_resolver', create_default_config_resolver + ) def _register_smart_defaults_factory(self): def create_smart_defaults_factory(): default_config_resolver = self._get_internal_component( - 'default_config_resolver') + 'default_config_resolver' + ) imds_region_provider = IMDSRegionProvider(session=self) return SmartDefaultsConfigStoreFactory( - default_config_resolver, imds_region_provider) + default_config_resolver, imds_region_provider + ) + self._internal_components.lazy_register_component( - 'smart_defaults_factory', create_smart_defaults_factory) + 'smart_defaults_factory', create_smart_defaults_factory + ) def _register_response_parser_factory(self): - self._components.register_component('response_parser_factory', - ResponseParserFactory()) + self._components.register_component( + 'response_parser_factory', ResponseParserFactory() + ) def _register_exceptions_factory(self): self._internal_components.register_component( - 'exceptions_factory', ClientExceptionsFactory()) + 'exceptions_factory', ClientExceptionsFactory() + ) def _register_builtin_handlers(self, events): for spec in handlers.BUILTIN_HANDLERS: @@ -223,12 +240,14 @@ class Session(object): config_store_component = ConfigValueStore( mapping=create_botocore_default_config_mapping(self) ) - self._components.register_component('config_store', - config_store_component) + self._components.register_component( + 'config_store', config_store_component + ) def _register_monitor(self): self._internal_components.lazy_register_component( - 'monitor', self._create_csm_monitor) + 'monitor', self._create_csm_monitor + ) def _create_csm_monitor(self): if self.get_config_variable('csm_enabled'): @@ -242,8 +261,9 @@ class Session(object): host=host, port=port, serializer=monitoring.CSMSerializer( - csm_client_id=client_id) - ) + csm_client_id=client_id + ), + ), ) return handler return None @@ -251,6 +271,7 @@ class Session(object): def _get_crt_version(self): try: import awscrt + return awscrt.__version__ except AttributeError: return "Unknown" @@ -277,9 +298,11 @@ class Session(object): def get_config_variable(self, logical_name, methods=None): if methods is not None: return self._get_config_variable_with_custom_methods( - logical_name, methods) + logical_name, methods + ) return self.get_component('config_store').get_config_variable( - logical_name) + logical_name + ) def _get_config_variable_with_custom_methods(self, logical_name, methods): # If a custom list of methods was supplied we need to perserve the @@ -305,9 +328,7 @@ class Session(object): mapping[name] = chain_builder.create_config_chain( **build_chain_config_args ) - config_store_component = ConfigValueStore( - mapping=mapping - ) + config_store_component = ConfigValueStore(mapping=mapping) value = config_store_component.get_config_variable(logical_name) return value @@ -406,7 +427,8 @@ class Session(object): # profile. cred_file = self.get_config_variable('credentials_file') cred_profiles = botocore.configloader.raw_config_parse( - cred_file) + cred_file + ) for profile in cred_profiles: cred_vars = cred_profiles[profile] if profile not in self._config['profiles']: @@ -454,9 +476,9 @@ class Session(object): :param token: An option session token used by STS session credentials. """ - self._credentials = botocore.credentials.Credentials(access_key, - secret_key, - token) + self._credentials = botocore.credentials.Credentials( + access_key, secret_key, token + ) def get_credentials(self): """ @@ -469,7 +491,8 @@ class Session(object): """ if self._credentials is None: self._credentials = self._components.get_component( - 'credential_provider').load_credentials() + 'credential_provider' + ).load_credentials() return self._credentials def user_agent(self): @@ -495,11 +518,11 @@ class Session(object): appended to the end of the user agent string. """ - base = '%s/%s Python/%s %s/%s' % (self.user_agent_name, - self.user_agent_version, - platform.python_version(), - platform.system(), - platform.release()) + base = ( + f'{self.user_agent_name}/{self.user_agent_version} ' + f'Python/{platform.python_version()} ' + f'{platform.system()}/{platform.release()}' + ) if HAS_CRT: base += ' awscrt/%s' % self._get_crt_version() if os.environ.get('AWS_EXECUTION_ENV') is not None: @@ -538,13 +561,15 @@ class Session(object): def get_waiter_model(self, service_name, api_version=None): loader = self.get_component('data_loader') waiter_config = loader.load_service_model( - service_name, 'waiters-2', api_version) + service_name, 'waiters-2', api_version + ) return waiter.WaiterModel(waiter_config) def get_paginator_model(self, service_name, api_version=None): loader = self.get_component('data_loader') paginator_config = loader.load_service_model( - service_name, 'paginators-1', api_version) + service_name, 'paginators-1', api_version + ) return paginate.PaginatorModel(paginator_config) def get_service_data(self, service_name, api_version=None): @@ -553,22 +578,24 @@ class Session(object): """ data_path = service_name service_data = self.get_component('data_loader').load_service_model( - data_path, - type_name='service-2', - api_version=api_version + data_path, type_name='service-2', api_version=api_version ) service_id = EVENT_ALIASES.get(service_name, service_name) - self._events.emit('service-data-loaded.%s' % service_id, - service_data=service_data, - service_name=service_name, session=self) + self._events.emit( + 'service-data-loaded.%s' % service_id, + service_data=service_data, + service_name=service_name, + session=self, + ) return service_data def get_available_services(self): """ Return a list of names of available services. """ - return self.get_component('data_loader')\ - .list_available_services(type_name='service-2') + return self.get_component('data_loader').list_available_services( + type_name='service-2' + ) def set_debug_logger(self, logger_name='botocore'): """ @@ -577,8 +604,9 @@ class Session(object): """ self.set_stream_logger(logger_name, logging.DEBUG) - def set_stream_logger(self, logger_name, log_level, stream=None, - format_string=None): + def set_stream_logger( + self, logger_name, log_level, stream=None, format_string=None + ): """ Convenience method to configure a stream logger. @@ -645,8 +673,9 @@ class Session(object): # add ch to logger log.addHandler(ch) - def register(self, event_name, handler, unique_id=None, - unique_id_uses_count=False): + def register( + self, event_name, handler, unique_id=None, unique_id_uses_count=False + ): """Register a handler with an event. :type event_name: str @@ -679,11 +708,20 @@ class Session(object): ``unique_id_uses_count`` value declared by the very first ``register`` call for that ``unique_id``. """ - self._events.register(event_name, handler, unique_id, - unique_id_uses_count=unique_id_uses_count) + self._events.register( + event_name, + handler, + unique_id, + unique_id_uses_count=unique_id_uses_count, + ) - def unregister(self, event_name, handler=None, unique_id=None, - unique_id_uses_count=False): + def unregister( + self, + event_name, + handler=None, + unique_id=None, + unique_id_uses_count=False, + ): """Unregister a handler with an event. :type event_name: str @@ -712,9 +750,12 @@ class Session(object): ``unique_id_uses_count`` value declared by the very first ``register`` call for that ``unique_id``. """ - self._events.unregister(event_name, handler=handler, - unique_id=unique_id, - unique_id_uses_count=unique_id_uses_count) + self._events.unregister( + event_name, + handler=handler, + unique_id=unique_id, + unique_id_uses_count=unique_id_uses_count, + ) def emit(self, event_name, **kwargs): return self._events.emit(event_name, **kwargs) @@ -732,7 +773,8 @@ class Session(object): 'Fetching the %s component with the get_component() ' 'method is deprecated as the component has always been ' 'considered an internal interface of botocore' % name, - DeprecationWarning) + DeprecationWarning, + ) return self._internal_components.get_component(name) raise @@ -754,10 +796,19 @@ class Session(object): def lazy_register_component(self, name, component): self._components.lazy_register_component(name, component) - def create_client(self, service_name, region_name=None, api_version=None, - use_ssl=True, verify=None, endpoint_url=None, - aws_access_key_id=None, aws_secret_access_key=None, - aws_session_token=None, config=None): + def create_client( + self, + service_name, + region_name=None, + api_version=None, + use_ssl=True, + verify=None, + endpoint_url=None, + aws_access_key_id=None, + aws_secret_access_key=None, + aws_session_token=None, + config=None, + ): """Create a botocore client. :type service_name: string @@ -846,25 +897,29 @@ class Session(object): if api_version is None: api_version = self.get_config_variable('api_versions').get( - service_name, None) + service_name, None + ) loader = self.get_component('data_loader') event_emitter = self.get_component('event_emitter') - response_parser_factory = self.get_component( - 'response_parser_factory') + response_parser_factory = self.get_component('response_parser_factory') if config is not None and config.signature_version is UNSIGNED: credentials = None - elif aws_access_key_id is not None and aws_secret_access_key is not None: + elif ( + aws_access_key_id is not None and aws_secret_access_key is not None + ): credentials = botocore.credentials.Credentials( access_key=aws_access_key_id, secret_key=aws_secret_access_key, - token=aws_session_token) - elif self._missing_cred_vars(aws_access_key_id, - aws_secret_access_key): + token=aws_session_token, + ) + elif self._missing_cred_vars(aws_access_key_id, aws_secret_access_key): raise PartialCredentialsError( provider='explicit', - cred_var=self._missing_cred_vars(aws_access_key_id, - aws_secret_access_key)) + cred_var=self._missing_cred_vars( + aws_access_key_id, aws_secret_access_key + ), + ) else: credentials = self.get_credentials() endpoint_resolver = self._get_internal_component('endpoint_resolver') @@ -873,19 +928,34 @@ class Session(object): defaults_mode = self._resolve_defaults_mode(config, config_store) if defaults_mode != 'legacy': smart_defaults_factory = self._get_internal_component( - 'smart_defaults_factory') + 'smart_defaults_factory' + ) config_store = copy.deepcopy(config_store) smart_defaults_factory.merge_smart_defaults( - config_store, defaults_mode, region_name) + config_store, defaults_mode, region_name + ) client_creator = botocore.client.ClientCreator( - loader, endpoint_resolver, self.user_agent(), event_emitter, - retryhandler, translate, response_parser_factory, - exceptions_factory, config_store) + loader, + endpoint_resolver, + self.user_agent(), + event_emitter, + retryhandler, + translate, + response_parser_factory, + exceptions_factory, + config_store, + ) client = client_creator.create_client( - service_name=service_name, region_name=region_name, - is_secure=use_ssl, endpoint_url=endpoint_url, verify=verify, - credentials=credentials, scoped_config=self.get_scoped_config(), - client_config=config, api_version=api_version) + service_name=service_name, + region_name=region_name, + is_secure=use_ssl, + endpoint_url=endpoint_url, + verify=verify, + credentials=credentials, + scoped_config=self.get_scoped_config(), + client_config=config, + api_version=api_version, + ) monitor = self._get_internal_component('monitor') if monitor is not None: monitor.register(client.meta.events) @@ -920,13 +990,13 @@ class Session(object): mode = client_config.defaults_mode default_config_resolver = self._get_internal_component( - 'default_config_resolver') + 'default_config_resolver' + ) default_modes = default_config_resolver.get_default_modes() lmode = mode.lower() if lmode not in default_modes: raise InvalidDefaultsMode( - mode=mode, - valid_modes=', '.join(default_modes) + mode=mode, valid_modes=', '.join(default_modes) ) return lmode @@ -960,8 +1030,9 @@ class Session(object): resolver = self._get_internal_component('endpoint_resolver') return resolver.get_partition_for_region(region_name) - def get_available_regions(self, service_name, partition_name='aws', - allow_non_regional=False): + def get_available_regions( + self, service_name, partition_name='aws', allow_non_regional=False + ): """Lists the region and endpoint names of a particular partition. :type service_name: string @@ -985,16 +1056,19 @@ class Session(object): try: service_data = self.get_service_data(service_name) endpoint_prefix = service_data['metadata'].get( - 'endpointPrefix', service_name) + 'endpointPrefix', service_name + ) results = resolver.get_available_endpoints( - endpoint_prefix, partition_name, allow_non_regional) + endpoint_prefix, partition_name, allow_non_regional + ) except UnknownServiceError: pass return results -class ComponentLocator(object): +class ComponentLocator: """Service locator for session components.""" + def __init__(self): self._components = {} self._deferred = {} @@ -1048,8 +1122,9 @@ class SessionVarDict(MutableMapping): def __len__(self): return len(self._store) - def _update_config_store_from_session_vars(self, logical_name, - config_options): + def _update_config_store_from_session_vars( + self, logical_name, config_options + ): # This is for backwards compatibility. The new preferred way to # modify configuration logic is to use the component system to get # the config_store component from the session, and then update @@ -1068,11 +1143,11 @@ class SessionVarDict(MutableMapping): config_property_names=config_name, default=default, conversion_func=typecast, - ) + ), ) -class SubsetChainConfigFactory(object): +class SubsetChainConfigFactory: """A class for creating backwards compatible configuration chains. This class can be used instead of @@ -1081,13 +1156,19 @@ class SubsetChainConfigFactory(object): out providers that are not in the methods tuple when creating a new config chain. """ + def __init__(self, session, methods, environ=None): self._factory = ConfigChainFactory(session, environ) self._supported_methods = methods - def create_config_chain(self, instance_name=None, env_var_names=None, - config_property_name=None, default=None, - conversion_func=None): + def create_config_chain( + self, + instance_name=None, + env_var_names=None, + config_property_name=None, + default=None, + conversion_func=None, + ): """Build a config chain following the standard botocore pattern. This config chain factory will omit any providers not in the methods diff --git a/contrib/python/botocore/py3/botocore/signers.py b/contrib/python/botocore/py3/botocore/signers.py index 4fb47433f0..9b258d5150 100644 --- a/contrib/python/botocore/py3/botocore/signers.py +++ b/contrib/python/botocore/py3/botocore/signers.py @@ -18,7 +18,7 @@ import weakref import botocore import botocore.auth from botocore.awsrequest import create_request_object, prepare_request_dict -from botocore.compat import OrderedDict, six +from botocore.compat import OrderedDict from botocore.exceptions import ( UnknownClientMethodError, UnknownSignatureVersionError, @@ -27,10 +27,10 @@ from botocore.exceptions import ( from botocore.utils import datetime2timestamp # Keep these imported. There's pre-existing code that uses them. -from botocore.utils import fix_s3_host # noqa +from botocore.utils import fix_s3_host # noqa -class RequestSigner(object): +class RequestSigner: """ An object to sign requests before they go out over the wire using one of the authentication mechanisms defined in ``auth.py``. This @@ -64,8 +64,16 @@ class RequestSigner(object): :type event_emitter: :py:class:`~botocore.hooks.BaseEventHooks` :param event_emitter: Extension mechanism to fire events. """ - def __init__(self, service_id, region_name, signing_name, - signature_version, credentials, event_emitter): + + def __init__( + self, + service_id, + region_name, + signing_name, + signature_version, + credentials, + event_emitter, + ): self._region_name = region_name self._signing_name = signing_name self._signature_version = signature_version @@ -94,8 +102,15 @@ class RequestSigner(object): # Don't call this method directly. return self.sign(operation_name, request) - def sign(self, operation_name, request, region_name=None, - signing_type='standard', expires_in=None, signing_name=None): + def sign( + self, + operation_name, + request, + region_name=None, + signing_type='standard', + expires_in=None, + signing_name=None, + ): """Sign a request before it goes out over the wire. :type operation_name: string @@ -130,23 +145,27 @@ class RequestSigner(object): signing_name = self._signing_name signature_version = self._choose_signer( - operation_name, signing_type, request.context) + operation_name, signing_type, request.context + ) # Allow mutating request before signing self._event_emitter.emit( - 'before-sign.{0}.{1}'.format( - self._service_id.hyphenize(), operation_name), - request=request, signing_name=signing_name, + 'before-sign.{}.{}'.format( + self._service_id.hyphenize(), operation_name + ), + request=request, + signing_name=signing_name, region_name=self._region_name, - signature_version=signature_version, request_signer=self, - operation_name=operation_name + signature_version=signature_version, + request_signer=self, + operation_name=operation_name, ) if signature_version != botocore.UNSIGNED: kwargs = { 'signing_name': signing_name, 'region_name': region_name, - 'signature_version': signature_version + 'signature_version': signature_version, } if expires_in is not None: kwargs['expires'] = expires_in @@ -160,7 +179,8 @@ class RequestSigner(object): except UnknownSignatureVersionError as e: if signing_type != 'standard': raise UnsupportedSignatureVersionError( - signature_version=signature_version) + signature_version=signature_version + ) else: raise e @@ -178,33 +198,42 @@ class RequestSigner(object): """ signing_type_suffix_map = { 'presign-post': '-presign-post', - 'presign-url': '-query' + 'presign-url': '-query', } suffix = signing_type_suffix_map.get(signing_type, '') signature_version = self._signature_version - if signature_version is not botocore.UNSIGNED and not \ - signature_version.endswith(suffix): + if ( + signature_version is not botocore.UNSIGNED + and not signature_version.endswith(suffix) + ): signature_version += suffix handler, response = self._event_emitter.emit_until_response( - 'choose-signer.{0}.{1}'.format( - self._service_id.hyphenize(), operation_name), - signing_name=self._signing_name, region_name=self._region_name, - signature_version=signature_version, context=context) + 'choose-signer.{}.{}'.format( + self._service_id.hyphenize(), operation_name + ), + signing_name=self._signing_name, + region_name=self._region_name, + signature_version=signature_version, + context=context, + ) if response is not None: signature_version = response # The suffix needs to be checked again in case we get an improper # signature version from choose-signer. - if signature_version is not botocore.UNSIGNED and not \ - signature_version.endswith(suffix): + if ( + signature_version is not botocore.UNSIGNED + and not signature_version.endswith(suffix) + ): signature_version += suffix return signature_version - def get_auth_instance(self, signing_name, region_name, - signature_version=None, **kwargs): + def get_auth_instance( + self, signing_name, region_name, signature_version=None, **kwargs + ): """ Get an auth instance which can be used to sign a request using the given signature version. @@ -229,7 +258,8 @@ class RequestSigner(object): cls = botocore.auth.AUTH_TYPE_MAPS.get(signature_version) if cls is None: raise UnknownSignatureVersionError( - signature_version=signature_version) + signature_version=signature_version + ) # If there's no credentials provided (i.e credentials is None), # then we'll pass a value of "None" over to the auth classes, # which already handle the cases where no credentials have @@ -249,9 +279,14 @@ class RequestSigner(object): # Alias get_auth for backwards compatibility. get_auth = get_auth_instance - def generate_presigned_url(self, request_dict, operation_name, - expires_in=3600, region_name=None, - signing_name=None): + def generate_presigned_url( + self, + request_dict, + operation_name, + expires_in=3600, + region_name=None, + signing_name=None, + ): """Generates a presigned url :type request_dict: dict @@ -274,14 +309,20 @@ class RequestSigner(object): :returns: The presigned url """ request = create_request_object(request_dict) - self.sign(operation_name, request, region_name, - 'presign-url', expires_in, signing_name) + self.sign( + operation_name, + request, + region_name, + 'presign-url', + expires_in, + signing_name, + ) request.prepare() return request.url -class CloudFrontSigner(object): +class CloudFrontSigner: '''A signer to create a signed CloudFront URL. First you create a cloudfront signer based on a normalized RSA signer:: @@ -343,25 +384,28 @@ class CloudFrontSigner(object): if date_less_than is not None: # We still need to build a canned policy for signing purpose policy = self.build_policy(url, date_less_than) - if isinstance(policy, six.text_type): + if isinstance(policy, str): policy = policy.encode('utf8') if date_less_than is not None: params = ['Expires=%s' % int(datetime2timestamp(date_less_than))] else: params = ['Policy=%s' % self._url_b64encode(policy).decode('utf8')] signature = self.rsa_signer(policy) - params.extend([ - 'Signature=%s' % self._url_b64encode(signature).decode('utf8'), - 'Key-Pair-Id=%s' % self.key_id, - ]) + params.extend( + [ + f"Signature={self._url_b64encode(signature).decode('utf8')}", + f"Key-Pair-Id={self.key_id}", + ] + ) return self._build_url(url, params) def _build_url(self, base_url, extra_params): separator = '&' if '?' in base_url else '?' return base_url + separator + '&'.join(extra_params) - def build_policy(self, resource, date_less_than, - date_greater_than=None, ip_address=None): + def build_policy( + self, resource, date_less_than, date_greater_than=None, ip_address=None + ): """A helper to build policy. :type resource: str @@ -404,8 +448,12 @@ class CloudFrontSigner(object): def _url_b64encode(self, data): # Required by CloudFront. See also: # http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-linux-openssl.html - return base64.b64encode( - data).replace(b'+', b'-').replace(b'=', b'_').replace(b'/', b'~') + return ( + base64.b64encode(data) + .replace(b'+', b'-') + .replace(b'=', b'_') + .replace(b'/', b'~') + ) def add_generate_db_auth_token(class_attributes, **kwargs): @@ -444,7 +492,7 @@ def generate_db_auth_token(self, DBHostname, Port, DBUsername, Region=None): 'query_string': '', 'headers': {}, 'body': params, - 'method': 'GET' + 'method': 'GET', } # RDS requires that the scheme not be set when sent over. This can cause @@ -454,22 +502,30 @@ def generate_db_auth_token(self, DBHostname, Port, DBUsername, Region=None): # netloc would be treated as a path component. To work around this we # introduce https here and remove it once we're done processing it. scheme = 'https://' - endpoint_url = '%s%s:%s' % (scheme, DBHostname, Port) + endpoint_url = f'{scheme}{DBHostname}:{Port}' prepare_request_dict(request_dict, endpoint_url) presigned_url = self._request_signer.generate_presigned_url( - operation_name='connect', request_dict=request_dict, - region_name=region, expires_in=900, signing_name='rds-db' + operation_name='connect', + request_dict=request_dict, + region_name=region, + expires_in=900, + signing_name='rds-db', ) - return presigned_url[len(scheme):] + return presigned_url[len(scheme) :] -class S3PostPresigner(object): +class S3PostPresigner: def __init__(self, request_signer): self._request_signer = request_signer - def generate_presigned_post(self, request_dict, fields=None, - conditions=None, expires_in=3600, - region_name=None): + def generate_presigned_post( + self, + request_dict, + fields=None, + conditions=None, + expires_in=3600, + region_name=None, + ): """Generates the url and the form fields used for a presigned s3 post :type request_dict: dict @@ -534,7 +590,8 @@ class S3PostPresigner(object): request.context['s3-presign-post-policy'] = policy self._request_signer.sign( - 'PutObject', request, region_name, 'presign-post') + 'PutObject', request, region_name, 'presign-post' + ) # Return the url and the fields for th form to post. return {'url': request.url, 'fields': fields} @@ -543,8 +600,9 @@ def add_generate_presigned_url(class_attributes, **kwargs): class_attributes['generate_presigned_url'] = generate_presigned_url -def generate_presigned_url(self, ClientMethod, Params=None, ExpiresIn=3600, - HttpMethod=None): +def generate_presigned_url( + self, ClientMethod, Params=None, ExpiresIn=3600, HttpMethod=None +): """Generate a presigned url given a client, its method, and arguments :type ClientMethod: string @@ -583,14 +641,12 @@ def generate_presigned_url(self, ClientMethod, Params=None, ExpiresIn=3600, except KeyError: raise UnknownClientMethodError(method_name=client_method) - operation_model = self.meta.service_model.operation_model( - operation_name) + operation_model = self.meta.service_model.operation_model(operation_name) params = self._emit_api_params(params, operation_model, context) # Create a request dict based on the params to serialize. - request_dict = serializer.serialize_to_request( - params, operation_model) + request_dict = serializer.serialize_to_request(params, operation_model) # Switch out the http method if user specified it. if http_method is not None: @@ -598,20 +654,24 @@ def generate_presigned_url(self, ClientMethod, Params=None, ExpiresIn=3600, # Prepare the request dict by including the client's endpoint url. prepare_request_dict( - request_dict, endpoint_url=self.meta.endpoint_url, context=context) + request_dict, endpoint_url=self.meta.endpoint_url, context=context + ) # Generate the presigned url. return request_signer.generate_presigned_url( - request_dict=request_dict, expires_in=expires_in, - operation_name=operation_name) + request_dict=request_dict, + expires_in=expires_in, + operation_name=operation_name, + ) def add_generate_presigned_post(class_attributes, **kwargs): class_attributes['generate_presigned_post'] = generate_presigned_post -def generate_presigned_post(self, Bucket, Key, Fields=None, Conditions=None, - ExpiresIn=3600): +def generate_presigned_post( + self, Bucket, Key, Fields=None, Conditions=None, ExpiresIn=3600 +): """Builds the url and the form fields used for a presigned s3 post :type Bucket: string @@ -693,16 +753,17 @@ def generate_presigned_post(self, Bucket, Key, Fields=None, Conditions=None, # We choose the CreateBucket operation model because its url gets # serialized to what a presign post requires. - operation_model = self.meta.service_model.operation_model( - 'CreateBucket') + operation_model = self.meta.service_model.operation_model('CreateBucket') # Create a request dict based on the params to serialize. request_dict = serializer.serialize_to_request( - {'Bucket': bucket}, operation_model) + {'Bucket': bucket}, operation_model + ) # Prepare the request dict by including the client's endpoint url. prepare_request_dict( - request_dict, endpoint_url=self.meta.endpoint_url, + request_dict, + endpoint_url=self.meta.endpoint_url, context={ 'is_presign_request': True, 'use_global_endpoint': _should_use_global_endpoint(self), @@ -715,7 +776,7 @@ def generate_presigned_post(self, Bucket, Key, Fields=None, Conditions=None, # If the key ends with filename, the only constraint that can be # imposed is if it starts with the specified prefix. if key.endswith('${filename}'): - conditions.append(["starts-with", '$key', key[:-len('${filename}')]]) + conditions.append(["starts-with", '$key', key[: -len('${filename}')]]) else: conditions.append({'key': key}) @@ -723,8 +784,11 @@ def generate_presigned_post(self, Bucket, Key, Fields=None, Conditions=None, fields['key'] = key return post_presigner.generate_presigned_post( - request_dict=request_dict, fields=fields, conditions=conditions, - expires_in=expires_in) + request_dict=request_dict, + fields=fields, + conditions=conditions, + expires_in=expires_in, + ) def _should_use_global_endpoint(client): @@ -734,7 +798,9 @@ def _should_use_global_endpoint(client): if s3_config: if s3_config.get('use_dualstack_endpoint', False): return False - if s3_config.get('us_east_1_regional_endpoint') == 'regional' and \ - client.meta.config.region_name == 'us-east-1': + if ( + s3_config.get('us_east_1_regional_endpoint') == 'regional' + and client.meta.config.region_name == 'us-east-1' + ): return False return True diff --git a/contrib/python/botocore/py3/botocore/stub.py b/contrib/python/botocore/py3/botocore/stub.py index 60bdcde041..137cfe4288 100644 --- a/contrib/python/botocore/py3/botocore/stub.py +++ b/contrib/python/botocore/py3/botocore/stub.py @@ -24,7 +24,7 @@ from botocore.exceptions import ( from botocore.validate import validate_parameters -class _ANY(object): +class _ANY: """ A helper object that compares equal to everything. Copied from unittest.mock @@ -43,7 +43,7 @@ class _ANY(object): ANY = _ANY() -class Stubber(object): +class Stubber: """ This class will allow you to stub out requests so you don't have to hit an endpoint to write tests. Responses are returned first in, first out. @@ -164,6 +164,7 @@ class Stubber(object): assert service_response == response """ + def __init__(self, client): """ :param client: The client to add your stubs to. @@ -187,11 +188,13 @@ class Stubber(object): self.client.meta.events.register_first( 'before-parameter-build.*.*', self._assert_expected_params, - unique_id=self._expected_params_event_id) + unique_id=self._expected_params_event_id, + ) self.client.meta.events.register( 'before-call.*.*', self._get_response_handler, - unique_id=self._event_id) + unique_id=self._event_id, + ) def deactivate(self): """ @@ -200,11 +203,13 @@ class Stubber(object): self.client.meta.events.unregister( 'before-parameter-build.*.*', self._assert_expected_params, - unique_id=self._expected_params_event_id) + unique_id=self._expected_params_event_id, + ) self.client.meta.events.unregister( 'before-call.*.*', self._get_response_handler, - unique_id=self._event_id) + unique_id=self._event_id, + ) def add_response(self, method, service_response, expected_params=None): """ @@ -235,7 +240,8 @@ class Stubber(object): if not hasattr(self.client, method): raise ValueError( "Client %s does not have method: %s" - % (self.client.meta.service_model.service_name, method)) + % (self.client.meta.service_model.service_name, method) + ) # Create a successful http response http_response = AWSResponse(None, 200, {}, None) @@ -247,14 +253,21 @@ class Stubber(object): response = { 'operation_name': operation_name, 'response': (http_response, service_response), - 'expected_params': expected_params + 'expected_params': expected_params, } self._queue.append(response) - def add_client_error(self, method, service_error_code='', - service_message='', http_status_code=400, - service_error_meta=None, expected_params=None, - response_meta=None, modeled_fields=None): + def add_client_error( + self, + method, + service_error_code='', + service_message='', + http_status_code=400, + service_error_meta=None, + expected_params=None, + response_meta=None, + modeled_fields=None, + ): """ Adds a ``ClientError`` to the response queue. @@ -301,10 +314,7 @@ class Stubber(object): # look like. parsed_response = { 'ResponseMetadata': {'HTTPStatusCode': http_status_code}, - 'Error': { - 'Message': service_message, - 'Code': service_error_code - } + 'Error': {'Message': service_message, 'Code': service_error_code}, } if service_error_meta is not None: @@ -335,8 +345,7 @@ class Stubber(object): """ remaining = len(self._queue) if remaining != 0: - raise AssertionError( - "%d responses remaining in queue." % remaining) + raise AssertionError(f"{remaining} responses remaining in queue.") def _assert_expected_call_order(self, model, params): if not self._queue: @@ -346,14 +355,15 @@ class Stubber(object): 'Unexpected API Call: A call was made but no additional ' 'calls expected. Either the API Call was not stubbed or ' 'it was called multiple times.' - ) + ), ) name = self._queue[0]['operation_name'] if name != model.name: raise StubResponseError( operation_name=model.name, - reason='Operation mismatch: found response for %s.' % name) + reason=f'Operation mismatch: found response for {name}.', + ) def _get_response_handler(self, model, params, context, **kwargs): self._assert_expected_call_order(model, params) @@ -373,15 +383,17 @@ class Stubber(object): if param not in params or expected_params[param] != params[param]: raise StubAssertionError( operation_name=model.name, - reason='Expected parameters:\n%s,\nbut received:\n%s' % ( - pformat(expected_params), pformat(params))) + reason='Expected parameters:\n%s,\nbut received:\n%s' + % (pformat(expected_params), pformat(params)), + ) # Ensure there are no extra params hanging around if sorted(expected_params.keys()) != sorted(params.keys()): raise StubAssertionError( operation_name=model.name, - reason='Expected parameters:\n%s,\nbut received:\n%s' % ( - pformat(expected_params), pformat(params))) + reason='Expected parameters:\n%s,\nbut received:\n%s' + % (pformat(expected_params), pformat(params)), + ) def _should_not_stub(self, context): # Do not include presign requests when processing stubbed client calls @@ -412,4 +424,6 @@ class Stubber(object): # empty apart from ResponseMetadata raise ParamValidationError( report=( - "Service response should only contain ResponseMetadata.")) + "Service response should only contain ResponseMetadata." + ) + ) diff --git a/contrib/python/botocore/py3/botocore/translate.py b/contrib/python/botocore/py3/botocore/translate.py index 5a661ab063..ecfe3bcaf4 100644 --- a/contrib/python/botocore/py3/botocore/translate.py +++ b/contrib/python/botocore/py3/botocore/translate.py @@ -16,8 +16,9 @@ import copy from botocore.utils import merge_dicts -def build_retry_config(endpoint_prefix, retry_model, definitions, - client_retry_config=None): +def build_retry_config( + endpoint_prefix, retry_model, definitions, client_retry_config=None +): service_config = retry_model.get(endpoint_prefix, {}) resolve_references(service_config, definitions) # We want to merge the global defaults with the service specific @@ -52,8 +53,9 @@ def _merge_client_retry_config(retry_config, client_retry_config): # configuration in the retry model via the client, we will need to # revisit this logic to make sure max_attempts gets applied # per operation. - retry_config['__default__'][ - 'max_attempts'] = max_retry_attempts_override + 1 + retry_config['__default__']['max_attempts'] = ( + max_retry_attempts_override + 1 + ) def resolve_references(config, definitions): diff --git a/contrib/python/botocore/py3/botocore/utils.py b/contrib/python/botocore/py3/botocore/utils.py index 94b180bf39..5f5493eeb4 100644 --- a/contrib/python/botocore/py3/botocore/utils.py +++ b/contrib/python/botocore/py3/botocore/utils.py @@ -51,7 +51,6 @@ from botocore.compat import ( get_tzinfo_options, json, quote, - six, urlparse, urlsplit, urlunsplit, @@ -95,7 +94,9 @@ METADATA_ENDPOINT_MODES = ('ipv4', 'ipv6') SAFE_CHARS = '-._~' LABEL_RE = re.compile(r'[a-z0-9][a-z0-9\-]*[a-z0-9]') RETRYABLE_HTTP_ERRORS = ( - ReadTimeoutError, EndpointConnectionError, ConnectionClosedError, + ReadTimeoutError, + EndpointConnectionError, + ConnectionClosedError, ConnectTimeoutError, ) S3_ACCELERATE_WHITELIST = ['dualstack'] @@ -175,7 +176,7 @@ EVENT_ALIASES = { "stepfunctions": "sfn", "storagegateway": "storage-gateway", "streams.dynamodb": "dynamodb-streams", - "tagging": "resource-groups-tagging-api" + "tagging": "resource-groups-tagging-api", } @@ -205,13 +206,14 @@ def resolve_imds_endpoint_mode(session): ec2_metadata_service_endpoint_mode takes precedence over imds_use_ipv6. """ endpoint_mode = session.get_config_variable( - 'ec2_metadata_service_endpoint_mode') + 'ec2_metadata_service_endpoint_mode' + ) if endpoint_mode is not None: lendpoint_mode = endpoint_mode.lower() if lendpoint_mode not in METADATA_ENDPOINT_MODES: error_msg_kwargs = { 'mode': endpoint_mode, - 'valid_modes': METADATA_ENDPOINT_MODES + 'valid_modes': METADATA_ENDPOINT_MODES, } raise InvalidIMDSEndpointModeError(**error_msg_kwargs) return lendpoint_mode @@ -230,10 +232,10 @@ def is_json_value_header(shape): :rtype: Bool """ return ( - hasattr(shape, 'serialization') and - shape.serialization.get('jsonvalue', False) and - shape.serialization.get('location') == 'header' and - shape.type_name == 'string' + hasattr(shape, 'serialization') + and shape.serialization.get('jsonvalue', False) + and shape.serialization.get('location') == 'header' + and shape.type_name == 'string' ) @@ -244,9 +246,7 @@ def has_header(header_name, headers): elif isinstance(headers, botocore.awsrequest.HeadersDict): return header_name in headers else: - return header_name.lower() in [ - key.lower() for key in headers.keys() - ] + return header_name.lower() in [key.lower() for key in headers.keys()] def get_service_module_name(service_model): @@ -257,7 +257,9 @@ def get_service_module_name(service_model): name = service_model.metadata.get( 'serviceAbbreviation', service_model.metadata.get( - 'serviceFullName', service_model.service_name)) + 'serviceFullName', service_model.service_name + ), + ) name = name.replace('Amazon', '') name = name.replace('AWS', '') name = re.sub(r'\W+', '', name) @@ -340,10 +342,7 @@ def set_value_from_jmespath(source, expression, value, is_first=True): source[current_key] = {} return set_value_from_jmespath( - source[current_key], - remainder, - value, - is_first=False + source[current_key], remainder, value, is_first=False ) # If we're down to a single key, set it. @@ -359,6 +358,7 @@ def is_global_accesspoint(context): class _RetriesExceededError(Exception): """Internal exception used when the number of retries are exceeded.""" + pass @@ -367,15 +367,21 @@ class BadIMDSRequestError(Exception): self.request = request -class IMDSFetcher(object): +class IMDSFetcher: _RETRIES_EXCEEDED_ERROR_CLS = _RetriesExceededError _TOKEN_PATH = 'latest/api/token' _TOKEN_TTL = '21600' - def __init__(self, timeout=DEFAULT_METADATA_SERVICE_TIMEOUT, - num_attempts=1, base_url=METADATA_BASE_URL, - env=None, user_agent=None, config=None): + def __init__( + self, + timeout=DEFAULT_METADATA_SERVICE_TIMEOUT, + num_attempts=1, + base_url=METADATA_BASE_URL, + env=None, + user_agent=None, + config=None, + ): self._timeout = timeout self._num_attempts = num_attempts if config is None: @@ -397,8 +403,12 @@ class IMDSFetcher(object): return self._base_url def _select_base_url(self, base_url, config): - requires_ipv6 = config.get( - 'ec2_metadata_service_endpoint_mode') == 'ipv6' + if config is None: + config = {} + + requires_ipv6 = ( + config.get('ec2_metadata_service_endpoint_mode') == 'ipv6' + ) custom_metadata_endpoint = config.get('ec2_metadata_service_endpoint') if requires_ipv6 and custom_metadata_endpoint: @@ -437,7 +447,8 @@ class IMDSFetcher(object): } self._add_user_agent(headers) request = botocore.awsrequest.AWSRequest( - method='PUT', url=url, headers=headers) + method='PUT', url=url, headers=headers + ) for i in range(self._num_attempts): try: response = self._session.send(request.prepare()) @@ -452,7 +463,11 @@ class IMDSFetcher(object): except RETRYABLE_HTTP_ERRORS as e: logger.debug( "Caught retryable HTTP exception while making metadata " - "service request to %s: %s", url, e, exc_info=True) + "service request to %s: %s", + url, + e, + exc_info=True, + ) except HTTPClientError as e: if isinstance(e.kwargs.get('error'), LocationParseError): raise InvalidIMDSEndpointError(endpoint=url, error=e) @@ -487,14 +502,19 @@ class IMDSFetcher(object): for i in range(self._num_attempts): try: request = botocore.awsrequest.AWSRequest( - method='GET', url=url, headers=headers) + method='GET', url=url, headers=headers + ) response = self._session.send(request.prepare()) if not retry_func(response): return response except RETRYABLE_HTTP_ERRORS as e: logger.debug( "Caught retryable HTTP exception while making metadata " - "service request to %s: %s", url, e, exc_info=True) + "service request to %s: %s", + url, + e, + exc_info=True, + ) raise self._RETRIES_EXCEEDED_ERROR_CLS() def _add_user_agent(self, headers): @@ -507,10 +527,7 @@ class IMDSFetcher(object): raise self._RETRIES_EXCEEDED_ERROR_CLS() def _default_retry(self, response): - return ( - self._is_non_ok_response(response) or - self._is_empty(response) - ) + return self._is_non_ok_response(response) or self._is_empty(response) def _is_non_ok_response(self, response): if response.status_code != 200: @@ -529,9 +546,7 @@ class IMDSFetcher(object): "Metadata service returned %s response " "with status code of %s for url: %s" ) - logger_args = [ - reason_to_log, response.status_code, response.url - ] + logger_args = [reason_to_log, response.status_code, response.url] if log_body: statement += ", content body: %s" logger_args.append(response.content) @@ -541,7 +556,10 @@ class IMDSFetcher(object): class InstanceMetadataFetcher(IMDSFetcher): _URL_PATH = 'latest/meta-data/iam/security-credentials/' _REQUIRED_CREDENTIAL_FIELDS = [ - 'AccessKeyId', 'SecretAccessKey', 'Token', 'Expiration' + 'AccessKeyId', + 'SecretAccessKey', + 'Token', + 'Expiration', ] def retrieve_iam_role_credentials(self): @@ -569,13 +587,18 @@ class InstanceMetadataFetcher(IMDSFetcher): # retrieve credentials. These error will contain both a # Code and Message key. if 'Code' in credentials and 'Message' in credentials: - logger.debug('Error response received when retrieving' - 'credentials: %s.', credentials) + logger.debug( + 'Error response received when retrieving' + 'credentials: %s.', + credentials, + ) return {} except self._RETRIES_EXCEEDED_ERROR_CLS: - logger.debug("Max number of attempts exceeded (%s) when " - "attempting to retrieve data from metadata service.", - self._num_attempts) + logger.debug( + "Max number of attempts exceeded (%s) when " + "attempting to retrieve data from metadata service.", + self._num_attempts, + ) except BadIMDSRequestError as e: logger.debug("Bad IMDS request: %s", e.request) return {} @@ -604,16 +627,13 @@ class InstanceMetadataFetcher(IMDSFetcher): return True def _needs_retry_for_role_name(self, response): - return ( - self._is_non_ok_response(response) or - self._is_empty(response) - ) + return self._is_non_ok_response(response) or self._is_empty(response) def _needs_retry_for_credentials(self, response): return ( - self._is_non_ok_response(response) or - self._is_empty(response) or - self._is_invalid_json(response) + self._is_non_ok_response(response) + or self._is_empty(response) + or self._is_invalid_json(response) ) def _contains_all_credential_fields(self, credentials): @@ -621,7 +641,8 @@ class InstanceMetadataFetcher(IMDSFetcher): if field not in credentials: logger.debug( 'Retrieved credentials is missing required field: %s', - field) + field, + ) return False return True @@ -638,11 +659,15 @@ class InstanceMetadataFetcher(IMDSFetcher): ) refresh_interval_with_jitter = refresh_interval + random.randint(120, 600) current_time = datetime.datetime.utcnow() - refresh_offset = datetime.timedelta(seconds=refresh_interval_with_jitter) + refresh_offset = datetime.timedelta( + seconds=refresh_interval_with_jitter + ) extension_time = expiration - refresh_offset if current_time >= extension_time: new_time = current_time + refresh_offset - credentials["expiry_time"] = new_time.strftime("%Y-%m-%dT%H:%M:%SZ") + credentials["expiry_time"] = new_time.strftime( + "%Y-%m-%dT%H:%M:%SZ" + ) logger.info( f"Attempting credential expiration extension due to a " f"credential service availability issue. A refresh of " @@ -655,7 +680,7 @@ class InstanceMetadataFetcher(IMDSFetcher): ) -class IMDSRegionProvider(object): +class IMDSRegionProvider: def __init__(self, session, environ=None, fetcher=None): """Initialize IMDSRegionProvider. :type session: :class:`botocore.session.Session` @@ -695,15 +720,18 @@ class IMDSRegionProvider(object): def _create_fetcher(self): metadata_timeout = self._session.get_config_variable( - 'metadata_service_timeout') + 'metadata_service_timeout' + ) metadata_num_attempts = self._session.get_config_variable( - 'metadata_service_num_attempts') + 'metadata_service_num_attempts' + ) imds_config = { 'ec2_metadata_service_endpoint': self._session.get_config_variable( - 'ec2_metadata_service_endpoint'), + 'ec2_metadata_service_endpoint' + ), 'ec2_metadata_service_endpoint_mode': resolve_imds_endpoint_mode( self._session - ) + ), } fetcher = InstanceMetadataRegionFetcher( timeout=metadata_timeout, @@ -734,9 +762,11 @@ class InstanceMetadataRegionFetcher(IMDSFetcher): region = self._get_region() return region except self._RETRIES_EXCEEDED_ERROR_CLS: - logger.debug("Max number of attempts exceeded (%s) when " - "attempting to retrieve data from metadata service.", - self._num_attempts) + logger.debug( + "Max number of attempts exceeded (%s) when " + "attempting to retrieve data from metadata service.", + self._num_attempts, + ) return None def _get_region(self): @@ -744,7 +774,7 @@ class InstanceMetadataRegionFetcher(IMDSFetcher): response = self._get_request( url_path=self._URL_PATH, retry_func=self._default_retry, - token=token + token=token, ) availability_zone = response.text region = availability_zone[:-1] @@ -781,7 +811,7 @@ def merge_dicts(dict1, dict2, append_lists=False): def lowercase_dict(original): - """Copies the given dictionary ensuring all keys are lowercase strings. """ + """Copies the given dictionary ensuring all keys are lowercase strings.""" copy = {} for key in original: copy[key.lower()] = original[key] @@ -840,11 +870,13 @@ def percent_encode_sequence(mapping, safe=SAFE_CHARS): for key, value in pairs: if isinstance(value, list): for element in value: - encoded_pairs.append('%s=%s' % (percent_encode(key), - percent_encode(element))) + encoded_pairs.append( + f'{percent_encode(key)}={percent_encode(element)}' + ) else: - encoded_pairs.append('%s=%s' % (percent_encode(key), - percent_encode(value))) + encoded_pairs.append( + f'{percent_encode(key)}={percent_encode(value)}' + ) return '&'.join(encoded_pairs) @@ -861,10 +893,10 @@ def percent_encode(input_str, safe=SAFE_CHARS): first. """ # If its not a binary or text string, make it a text string. - if not isinstance(input_str, (six.binary_type, six.text_type)): - input_str = six.text_type(input_str) + if not isinstance(input_str, (bytes, str)): + input_str = str(input_str) # If it's not bytes, make it bytes by UTF-8 encoding it. - if not isinstance(input_str, six.binary_type): + if not isinstance(input_str, bytes): input_str = input_str.encode('utf-8') return quote(input_str, safe=safe) @@ -885,7 +917,7 @@ def _parse_timestamp_with_tzinfo(value, tzinfo): # enforce that GMT == UTC. return dateutil.parser.parse(value, tzinfos={'GMT': tzutc()}) except (TypeError, ValueError) as e: - raise ValueError('Invalid timestamp "%s": %s' % (value, e)) + raise ValueError(f'Invalid timestamp "{value}": {e}') def parse_timestamp(value): @@ -904,10 +936,14 @@ def parse_timestamp(value): try: return _parse_timestamp_with_tzinfo(value, tzinfo) except OSError as e: - logger.debug('Unable to parse timestamp with "%s" timezone info.', - tzinfo.__name__, exc_info=e) - raise RuntimeError('Unable to calculate correct timezone offset for ' - '"%s"' % value) + logger.debug( + 'Unable to parse timestamp with "%s" timezone info.', + tzinfo.__name__, + exc_info=e, + ) + raise RuntimeError( + 'Unable to calculate correct timezone offset for "%s"' % value + ) def parse_to_aware_datetime(value): @@ -973,7 +1009,9 @@ def datetime2timestamp(dt, default_timezone=None): d = dt.replace(tzinfo=None) - dt.utcoffset() - epoch if hasattr(d, "total_seconds"): return d.total_seconds() # Works in Python 3.6+ - return (d.microseconds + (d.seconds + d.days * 24 * 3600) * 10**6) / 10**6 + return ( + d.microseconds + (d.seconds + d.days * 24 * 3600) * 10**6 + ) / 10**6 def calculate_sha256(body, as_hex=False): @@ -1053,7 +1091,7 @@ def _in_pairs(iterable): return zip_longest(shared_iter, shared_iter) -class CachedProperty(object): +class CachedProperty: """A read only property that caches the initially computed value. This descriptor will only call the provided ``fget`` function once. @@ -1073,7 +1111,7 @@ class CachedProperty(object): return computed_value -class ArgumentGenerator(object): +class ArgumentGenerator: """Generate sample input based on a shape model. This class contains a ``generate_skeleton`` method that will take @@ -1099,6 +1137,7 @@ class ArgumentGenerator(object): print("Sample input for dynamodb.CreateTable: %s" % sample_input) """ + def __init__(self, use_member_names=False): self._use_member_names = use_member_names @@ -1147,7 +1186,8 @@ class ArgumentGenerator(object): skeleton = OrderedDict() for member_name, member_shape in shape.members.items(): skeleton[member_name] = self._generate_skeleton( - member_shape, stack, name=member_name) + member_shape, stack, name=member_name + ) return skeleton def _generate_type_list(self, shape, stack): @@ -1164,15 +1204,17 @@ class ArgumentGenerator(object): key_shape = shape.key value_shape = shape.value assert key_shape.type_name == 'string' - return OrderedDict([ - ('KeyName', self._generate_skeleton(value_shape, stack)), - ]) + return OrderedDict( + [ + ('KeyName', self._generate_skeleton(value_shape, stack)), + ] + ) def is_valid_ipv6_endpoint_url(endpoint_url): if UNSAFE_URL_CHARS.intersection(endpoint_url): return False - hostname = '[{}]'.format(urlparse(endpoint_url).hostname) + hostname = f'[{urlparse(endpoint_url).hostname}]' return IPV6_ADDRZ_RE.match(hostname) is not None @@ -1200,12 +1242,15 @@ def is_valid_endpoint_url(endpoint_url): hostname = hostname[:-1] allowed = re.compile( r"^((?!-)[A-Z\d-]{1,63}(?<!-)\.)*((?!-)[A-Z\d-]{1,63}(?<!-))$", - re.IGNORECASE) + re.IGNORECASE, + ) return allowed.match(hostname) def is_valid_uri(endpoint_url): - return is_valid_endpoint_url(endpoint_url) or is_valid_ipv6_endpoint_url(endpoint_url) + return is_valid_endpoint_url(endpoint_url) or is_valid_ipv6_endpoint_url( + endpoint_url + ) def validate_region_name(region_name): @@ -1241,8 +1286,13 @@ def check_dns_name(bucket_name): return True -def fix_s3_host(request, signature_version, region_name, - default_endpoint_url=None, **kwargs): +def fix_s3_host( + request, + signature_version, + region_name, + default_endpoint_url=None, + **kwargs, +): """ This handler looks at S3 requests just before they are signed. If there is a bucket name on the path (true for everything except @@ -1256,15 +1306,18 @@ def fix_s3_host(request, signature_version, region_name, default_endpoint_url = 's3.amazonaws.com' try: switch_to_virtual_host_style( - request, signature_version, default_endpoint_url) + request, signature_version, default_endpoint_url + ) except InvalidDNSNameError as e: bucket_name = e.kwargs['bucket_name'] - logger.debug('Not changing URI, bucket is not DNS compatible: %s', - bucket_name) + logger.debug( + 'Not changing URI, bucket is not DNS compatible: %s', bucket_name + ) -def switch_to_virtual_host_style(request, signature_version, - default_endpoint_url=None, **kwargs): +def switch_to_virtual_host_style( + request, signature_version, default_endpoint_url=None, **kwargs +): """ This is a handler to force virtual host style s3 addressing no matter the signature version (which is taken in consideration for the default @@ -1285,8 +1338,10 @@ def switch_to_virtual_host_style(request, signature_version, # For the GetBucketLocation response, we should not be using # the virtual host style addressing so we can avoid any sigv4 # issues. - logger.debug("Request is GetBucketLocation operation, not checking " - "for DNS compatibility.") + logger.debug( + "Request is GetBucketLocation operation, not checking " + "for DNS compatibility." + ) return parts = urlsplit(request.url) request.auth_path = parts.path @@ -1302,8 +1357,7 @@ def switch_to_virtual_host_style(request, signature_version, # If the bucket name is empty we should not be checking for # dns compatibility. return - logger.debug('Checking for DNS compatible bucket for: %s', - request.url) + logger.debug('Checking for DNS compatible bucket for: %s', request.url) if check_dns_name(bucket_name): # If the operation is on a bucket, the auth_path must be # terminated with a '/' character. @@ -1317,8 +1371,7 @@ def switch_to_virtual_host_style(request, signature_version, path = '/'.join(path_parts) or '/' global_endpoint = default_endpoint_url host = bucket_name + '.' + global_endpoint - new_tuple = (parts.scheme, host, path, - parts.query, '') + new_tuple = (parts.scheme, host, path, parts.query, '') new_uri = urlunsplit(new_tuple) request.url = new_uri logger.debug('URI updated to: %s', new_uri) @@ -1359,6 +1412,7 @@ def instance_cache(func): result = func(self, *args, **kwargs) self._instance_cache[cache_key] = result return result + return _cache_guard @@ -1391,7 +1445,8 @@ def switch_host_with_param(request, param_name): def _switch_hosts(request, new_endpoint, use_new_scheme=True): final_endpoint = _get_new_endpoint( - request.url, new_endpoint, use_new_scheme) + request.url, new_endpoint, use_new_scheme + ) request.url = final_endpoint @@ -1406,11 +1461,10 @@ def _get_new_endpoint(original_endpoint, new_endpoint, use_new_scheme=True): new_endpoint_components.netloc, original_endpoint_components.path, original_endpoint_components.query, - '' + '', ) final_endpoint = urlunsplit(final_endpoint_components) - logger.debug('Updating URI from %s to %s' % ( - original_endpoint, final_endpoint)) + logger.debug(f'Updating URI from {original_endpoint} to {final_endpoint}') return final_endpoint @@ -1423,8 +1477,11 @@ def deep_merge(base, extra): """ for key in extra: # If the key represents a dict on both given dicts, merge the sub-dicts - if key in base and isinstance(base[key], dict)\ - and isinstance(extra[key], dict): + if ( + key in base + and isinstance(base[key], dict) + and isinstance(extra[key], dict) + ): deep_merge(base[key], extra[key]) continue @@ -1440,7 +1497,7 @@ def hyphenize_service_id(service_id): return service_id.replace(' ', '-').lower() -class S3RegionRedirector(object): +class S3RegionRedirector: def __init__(self, endpoint_bridge, client, cache=None): self._endpoint_resolver = endpoint_bridge self._cache = cache @@ -1455,8 +1512,7 @@ class S3RegionRedirector(object): emitter = event_emitter or self._client.meta.events emitter.register('needs-retry.s3', self.redirect_from_error) emitter.register('before-call.s3', self.set_request_url) - emitter.register('before-parameter-build.s3', - self.redirect_from_cache) + emitter.register('before-parameter-build.s3', self.redirect_from_cache) def redirect_from_error(self, request_dict, response, operation, **kwargs): """ @@ -1478,7 +1534,8 @@ class S3RegionRedirector(object): if request_dict.get('context', {}).get('s3_redirected'): logger.debug( - 'S3 request was previously redirected, not redirecting.') + 'S3 request was previously redirected, not redirecting.' + ) return error = response[1].get('Error', {}) @@ -1490,24 +1547,30 @@ class S3RegionRedirector(object): # we'll get a 400 Bad Request but we won't get a # body saying it's an "AuthorizationHeaderMalformed". is_special_head_object = ( - error_code in ['301', '400'] and - operation.name == 'HeadObject' + error_code in ('301', '400') and operation.name == 'HeadObject' ) is_special_head_bucket = ( - error_code in ['301', '400'] and - operation.name == 'HeadBucket' and - 'x-amz-bucket-region' in response_metadata.get('HTTPHeaders', {}) + error_code in ('301', '400') + and operation.name == 'HeadBucket' + and 'x-amz-bucket-region' + in response_metadata.get('HTTPHeaders', {}) ) is_wrong_signing_region = ( - error_code == 'AuthorizationHeaderMalformed' and - 'Region' in error + error_code == 'AuthorizationHeaderMalformed' and 'Region' in error ) - is_redirect_status = response[0] is not None and \ - response[0].status_code in [301, 302, 307] + is_redirect_status = response[0] is not None and response[ + 0 + ].status_code in (301, 302, 307) is_permanent_redirect = error_code == 'PermanentRedirect' - if not any([is_special_head_object, is_wrong_signing_region, - is_permanent_redirect, is_special_head_bucket, - is_redirect_status]): + if not any( + [ + is_special_head_object, + is_wrong_signing_region, + is_permanent_redirect, + is_special_head_bucket, + is_redirect_status, + ] + ): return bucket = request_dict['context']['signing']['bucket'] @@ -1518,21 +1581,23 @@ class S3RegionRedirector(object): logger.debug( "S3 client configured for region %s but the bucket %s is not " "in that region and the proper region could not be " - "automatically determined." % (client_region, bucket)) + "automatically determined." % (client_region, bucket) + ) return logger.debug( "S3 client configured for region %s but the bucket %s is in region" " %s; Please configure the proper region to avoid multiple " - "unnecessary redirects and signing attempts." % ( - client_region, bucket, new_region)) + "unnecessary redirects and signing attempts." + % (client_region, bucket, new_region) + ) endpoint = self._endpoint_resolver.resolve('s3', new_region) endpoint = endpoint['endpoint_url'] signing_context = { 'region': new_region, 'bucket': bucket, - 'endpoint': endpoint + 'endpoint': endpoint, } request_dict['context']['signing'] = signing_context @@ -1604,7 +1669,7 @@ class InvalidArnException(ValueError): pass -class ArnParser(object): +class ArnParser: def parse_arn(self, arn): arn_parts = arn.split(':', 5) if len(arn_parts) < 6: @@ -1621,7 +1686,7 @@ class ArnParser(object): } -class S3ArnParamHandler(object): +class S3ArnParamHandler: _RESOURCE_REGEX = re.compile( r'^(?P<resource_type>accesspoint|outpost)[/:](?P<resource_name>.+)$' ) @@ -1629,9 +1694,7 @@ class S3ArnParamHandler(object): r'^(?P<outpost_name>[a-zA-Z0-9\-]{1,63})[/:]accesspoint[/:]' r'(?P<accesspoint_name>[a-zA-Z0-9\-]{1,63}$)' ) - _BLACKLISTED_OPERATIONS = [ - 'CreateBucket' - ] + _BLACKLISTED_OPERATIONS = ['CreateBucket'] def __init__(self, arn_parser=None): self._arn_parser = arn_parser @@ -1708,13 +1771,19 @@ class S3ArnParamHandler(object): } -class S3EndpointSetter(object): +class S3EndpointSetter: _DEFAULT_PARTITION = 'aws' _DEFAULT_DNS_SUFFIX = 'amazonaws.com' - def __init__(self, endpoint_resolver, region=None, - s3_config=None, endpoint_url=None, partition=None, - use_fips_endpoint=False): + def __init__( + self, + endpoint_resolver, + region=None, + s3_config=None, + endpoint_url=None, + partition=None, + use_fips_endpoint=False, + ): # This is calling the endpoint_resolver in regions.py self._endpoint_resolver = endpoint_resolver self._region = region @@ -1732,7 +1801,7 @@ class S3EndpointSetter(object): event_emitter.register('choose-signer.s3', self.set_signer) event_emitter.register( 'before-call.s3.WriteGetObjectResponse', - self.update_endpoint_to_s3_object_lambda + self.update_endpoint_to_s3_object_lambda, ) def update_endpoint_to_s3_object_lambda(self, params, context, **kwargs): @@ -1748,7 +1817,9 @@ class S3EndpointSetter(object): resolver = self._endpoint_resolver # Constructing endpoints as s3-object-lambda as region - resolved = resolver.construct_endpoint('s3-object-lambda', self._region) + resolved = resolver.construct_endpoint( + 's3-object-lambda', self._region + ) # Ideally we would be able to replace the endpoint before # serialization but there's no event to do that currently @@ -1766,9 +1837,9 @@ class S3EndpointSetter(object): self._validate_fips_supported(request) self._validate_global_regions(request) region_name = self._resolve_region_for_accesspoint_endpoint( - request) - self._resolve_signing_name_for_accesspoint_endpoint( - request) + request + ) + self._resolve_signing_name_for_accesspoint_endpoint(request) self._switch_to_accesspoint_endpoint(request, region_name) return if self._use_accelerate_endpoint: @@ -1792,16 +1863,14 @@ class S3EndpointSetter(object): return if 'fips' in request.context['s3_accesspoint']['region']: raise UnsupportedS3AccesspointConfigurationError( - msg={ - 'Invalid ARN, FIPS region not allowed in ARN.' - } + msg={'Invalid ARN, FIPS region not allowed in ARN.'} ) if 'outpost_name' in request.context['s3_accesspoint']: raise UnsupportedS3AccesspointConfigurationError( msg=( 'Client is configured to use the FIPS psuedo-region "%s", ' - 'but outpost ARNs do not support FIPS endpoints.' % ( - self._region) + 'but outpost ARNs do not support FIPS endpoints.' + % (self._region) ) ) # Transforming psuedo region to actual region @@ -1816,8 +1885,8 @@ class S3EndpointSetter(object): 'for "%s", but the access-point ARN provided is for ' 'the "%s" region. For clients using a FIPS ' 'psuedo-region calls to access-point ARNs in another ' - 'region are not allowed.' % (self._region, - accesspoint_region) + 'region are not allowed.' + % (self._region, accesspoint_region) ) ) @@ -1847,12 +1916,14 @@ class S3EndpointSetter(object): msg=( 'Client is configured for "%s" partition, but access-point' ' ARN provided is for "%s" partition. The client and ' - ' access-point partition must be the same.' % ( - self._partition, request_partition) + ' access-point partition must be the same.' + % (self._partition, request_partition) ) ) s3_service = request.context['s3_accesspoint'].get('service') - if s3_service == 's3-object-lambda' and self._s3_config.get('use_dualstack_endpoint'): + if s3_service == 's3-object-lambda' and self._s3_config.get( + 'use_dualstack_endpoint' + ): raise UnsupportedS3AccesspointConfigurationError( msg=( 'Client does not support s3 dualstack configuration ' @@ -1917,16 +1988,20 @@ class S3EndpointSetter(object): def _switch_to_accesspoint_endpoint(self, request, region_name): original_components = urlsplit(request.url) - accesspoint_endpoint = urlunsplit(( - original_components.scheme, - self._get_netloc(request.context, region_name), - self._get_accesspoint_path( - original_components.path, request.context), - original_components.query, - '' - )) + accesspoint_endpoint = urlunsplit( + ( + original_components.scheme, + self._get_netloc(request.context, region_name), + self._get_accesspoint_path( + original_components.path, request.context + ), + original_components.query, + '', + ) + ) logger.debug( - 'Updating URI from %s to %s' % (request.url, accesspoint_endpoint)) + f'Updating URI from {request.url} to {accesspoint_endpoint}' + ) request.url = accesspoint_endpoint def _get_netloc(self, request_context, region_name): @@ -1938,9 +2013,7 @@ class S3EndpointSetter(object): def _get_mrap_netloc(self, request_context): s3_accesspoint = request_context['s3_accesspoint'] region_name = 's3-global' - mrap_netloc_components = [ - s3_accesspoint['name'] - ] + mrap_netloc_components = [s3_accesspoint['name']] if self._endpoint_url: endpoint_url_netloc = urlsplit(self._endpoint_url).netloc mrap_netloc_components.append(endpoint_url_netloc) @@ -1950,7 +2023,7 @@ class S3EndpointSetter(object): [ 'accesspoint', region_name, - self._get_partition_dns_suffix(partition) + self._get_partition_dns_suffix(partition), ] ) return '.'.join(mrap_netloc_components) @@ -1958,7 +2031,7 @@ class S3EndpointSetter(object): def _get_accesspoint_netloc(self, request_context, region_name): s3_accesspoint = request_context['s3_accesspoint'] accesspoint_netloc_components = [ - '%s-%s' % (s3_accesspoint['name'], s3_accesspoint['account']), + '{}-{}'.format(s3_accesspoint['name'], s3_accesspoint['account']), ] outpost_name = s3_accesspoint.get('outpost_name') if self._endpoint_url: @@ -1972,19 +2045,18 @@ class S3EndpointSetter(object): accesspoint_netloc_components.extend(outpost_host) elif s3_accesspoint['service'] == 's3-object-lambda': component = self._inject_fips_if_needed( - 's3-object-lambda', request_context) + 's3-object-lambda', request_context + ) accesspoint_netloc_components.append(component) else: component = self._inject_fips_if_needed( - 's3-accesspoint', request_context) + 's3-accesspoint', request_context + ) accesspoint_netloc_components.append(component) if self._s3_config.get('use_dualstack_endpoint'): accesspoint_netloc_components.append('dualstack') accesspoint_netloc_components.extend( - [ - region_name, - self._get_dns_suffix(region_name) - ] + [region_name, self._get_dns_suffix(region_name)] ) return '.'.join(accesspoint_netloc_components) @@ -2012,7 +2084,8 @@ class S3EndpointSetter(object): def _get_dns_suffix(self, region_name): resolved = self._endpoint_resolver.construct_endpoint( - 's3', region_name) + 's3', region_name + ) dns_suffix = self._DEFAULT_DNS_SUFFIX if resolved and 'dnsSuffix' in resolved: dns_suffix = resolved['dnsSuffix'] @@ -2099,21 +2172,29 @@ class S3EndpointSetter(object): logger.debug("Using S3 path style addressing.") return None - logger.debug("Defaulting to S3 virtual host style addressing with " - "path style addressing fallback.") + logger.debug( + "Defaulting to S3 virtual host style addressing with " + "path style addressing fallback." + ) # By default, try to use virtual style with path fallback. return fix_s3_host -class S3ControlEndpointSetter(object): +class S3ControlEndpointSetter: _DEFAULT_PARTITION = 'aws' _DEFAULT_DNS_SUFFIX = 'amazonaws.com' _HOST_LABEL_REGEX = re.compile(r'^[a-zA-Z0-9\-]{1,63}$') - def __init__(self, endpoint_resolver, region=None, - s3_config=None, endpoint_url=None, partition=None, - use_fips_endpoint=False): + def __init__( + self, + endpoint_resolver, + region=None, + s3_config=None, + endpoint_url=None, + partition=None, + use_fips_endpoint=False, + ): self._endpoint_resolver = endpoint_resolver self._region = region self._s3_config = s3_config @@ -2151,9 +2232,7 @@ class S3ControlEndpointSetter(object): if 'fips' in request.context['arn_details']['region']: raise UnsupportedS3ControlArnError( arn=request.context['arn_details']['original'], - msg={ - 'Invalid ARN, FIPS region not allowed in ARN.' - } + msg='Invalid ARN, FIPS region not allowed in ARN.', ) if not self._s3_config.get('use_arn_region', False): arn_region = request.context['arn_details']['region'] @@ -2170,8 +2249,8 @@ class S3ControlEndpointSetter(object): msg=( 'Client is configured for "%s" partition, but arn ' 'provided is for "%s" partition. The client and ' - 'arn partition must be the same.' % ( - self._partition, request_partion) + 'arn partition must be the same.' + % (self._partition, request_partion) ) ) if self._s3_config.get('use_accelerate_endpoint'): @@ -2205,20 +2284,24 @@ class S3ControlEndpointSetter(object): return arn_service def _resolve_endpoint_from_arn_details(self, request, region_name): - new_netloc = self._resolve_netloc_from_arn_details(request, region_name) + new_netloc = self._resolve_netloc_from_arn_details( + request, region_name + ) self._update_request_netloc(request, new_netloc) def _update_request_netloc(self, request, new_netloc): original_components = urlsplit(request.url) - arn_details_endpoint = urlunsplit(( - original_components.scheme, - new_netloc, - original_components.path, - original_components.query, - '' - )) + arn_details_endpoint = urlunsplit( + ( + original_components.scheme, + new_netloc, + original_components.path, + original_components.query, + '', + ) + ) logger.debug( - 'Updating URI from %s to %s' % (request.url, arn_details_endpoint) + f'Updating URI from {request.url} to {arn_details_endpoint}' ) request.url = arn_details_endpoint @@ -2278,7 +2361,8 @@ class S3ControlEndpointSetter(object): def _get_dns_suffix(self, region_name): resolved = self._endpoint_resolver.construct_endpoint( - 's3', region_name) + 's3', region_name + ) dns_suffix = self._DEFAULT_DNS_SUFFIX if resolved and 'dnsSuffix' in resolved: dns_suffix = resolved['dnsSuffix'] @@ -2312,7 +2396,7 @@ class S3ControlEndpointSetter(object): request.headers['x-amz-outpost-id'] = outpost_name -class S3ControlArnParamHandler(object): +class S3ControlArnParamHandler: _RESOURCE_SPLIT_REGEX = re.compile(r'[/:]') def __init__(self, arn_parser=None): @@ -2435,7 +2519,7 @@ class S3ControlArnParamHandler(object): context['arn_details'] = arn_details -class ContainerMetadataFetcher(object): +class ContainerMetadataFetcher: TIMEOUT_SECONDS = 2 RETRY_ATTEMPTS = 3 @@ -2465,13 +2549,13 @@ class ContainerMetadataFetcher(object): def _validate_allowed_url(self, full_url): parsed = botocore.compat.urlparse(full_url) - is_whitelisted_host = self._check_if_whitelisted_host( - parsed.hostname) + is_whitelisted_host = self._check_if_whitelisted_host(parsed.hostname) if not is_whitelisted_host: raise ValueError( "Unsupported host '%s'. Can only " - "retrieve metadata from these hosts: %s" % - (parsed.hostname, ', '.join(self._ALLOWED_HOSTS))) + "retrieve metadata from these hosts: %s" + % (parsed.hostname, ', '.join(self._ALLOWED_HOSTS)) + ) def _check_if_whitelisted_host(self, host): if host in self._ALLOWED_HOSTS: @@ -2498,10 +2582,15 @@ class ContainerMetadataFetcher(object): while True: try: return self._get_response( - full_url, headers, self.TIMEOUT_SECONDS) + full_url, headers, self.TIMEOUT_SECONDS + ) except MetadataRetrievalError as e: - logger.debug("Received error when attempting to retrieve " - "container metadata: %s", e, exc_info=True) + logger.debug( + "Received error when attempting to retrieve " + "container metadata: %s", + e, + exc_info=True, + ) self._sleep(self.SLEEP_TIME) attempts += 1 if attempts >= self.RETRY_ATTEMPTS: @@ -2517,7 +2606,9 @@ class ContainerMetadataFetcher(object): raise MetadataRetrievalError( error_msg=( "Received non 200 response (%s) from ECS metadata: %s" - ) % (response.status_code, response_text)) + ) + % (response.status_code, response_text) + ) try: return json.loads(response_text) except ValueError: @@ -2527,12 +2618,14 @@ class ContainerMetadataFetcher(object): logger.debug('%s:%s', error_msg, response_text) raise MetadataRetrievalError(error_msg=error_msg) except RETRYABLE_HTTP_ERRORS as e: - error_msg = ("Received error when attempting to retrieve " - "ECS metadata: %s" % e) + error_msg = ( + "Received error when attempting to retrieve " + "ECS metadata: %s" % e + ) raise MetadataRetrievalError(error_msg=error_msg) def full_url(self, relative_uri): - return 'http://%s%s' % (self.IP_ADDRESS, relative_uri) + return f'http://{self.IP_ADDRESS}{relative_uri}' def get_environ_proxies(url): @@ -2654,7 +2747,7 @@ def conditionally_calculate_md5(params, **kwargs): params['headers']['Content-MD5'] = md5_digest -class FileWebIdentityTokenLoader(object): +class FileWebIdentityTokenLoader: def __init__(self, web_identity_token_path, _open=open): self._web_identity_token_path = web_identity_token_path self._open = _open @@ -2664,7 +2757,7 @@ class FileWebIdentityTokenLoader(object): return token_file.read() -class SSOTokenLoader(object): +class SSOTokenLoader: def __init__(self, cache=None): if cache is None: cache = {} @@ -2691,12 +2784,7 @@ class EventbridgeSignerSetter: _DEFAULT_PARTITION = 'aws' _DEFAULT_DNS_SUFFIX = 'amazonaws.com' - def __init__( - self, - endpoint_resolver, - region=None, - endpoint_url=None - ): + def __init__(self, endpoint_resolver, region=None, endpoint_url=None): self._endpoint_resolver = endpoint_resolver self._region = region self._endpoint_url = endpoint_url @@ -2704,19 +2792,16 @@ class EventbridgeSignerSetter: def register(self, event_emitter): event_emitter.register( 'before-parameter-build.events.PutEvents', - self.check_for_global_endpoint + self.check_for_global_endpoint, ) event_emitter.register( - 'before-call.events.PutEvents', - self.set_endpoint_url + 'before-call.events.PutEvents', self.set_endpoint_url ) def set_endpoint_url(self, params, context, **kwargs): if 'eventbridge_endpoint' in context: endpoint = context['eventbridge_endpoint'] - logger.debug( - f"Rewriting URL from {params['url']} to {endpoint}" - ) + logger.debug(f"Rewriting URL from {params['url']} to {endpoint}") params['url'] = endpoint def check_for_global_endpoint(self, params, context, **kwargs): @@ -2732,8 +2817,8 @@ class EventbridgeSignerSetter: if not HAS_CRT: raise MissingDependencyException( msg="Using EndpointId requires an additional " - "dependency. You will need to pip install " - "botocore[crt] before proceeding." + "dependency. You will need to pip install " + "botocore[crt] before proceeding." ) config = context.get('client_config') @@ -2755,8 +2840,7 @@ class EventbridgeSignerSetter: msg='EndpointId is not a valid hostname component.' ) resolved_endpoint = self._get_global_endpoint( - endpoint, - endpoint_variant_tags=endpoint_variant_tags + endpoint, endpoint_variant_tags=endpoint_variant_tags ) else: resolved_endpoint = self._endpoint_url @@ -2771,8 +2855,7 @@ class EventbridgeSignerSetter: if partition is None: partition = self._DEFAULT_PARTITION dns_suffix = resolver.get_partition_dns_suffix( - partition, - endpoint_variant_tags=endpoint_variant_tags + partition, endpoint_variant_tags=endpoint_variant_tags ) if dns_suffix is None: dns_suffix = self._DEFAULT_DNS_SUFFIX diff --git a/contrib/python/botocore/py3/botocore/validate.py b/contrib/python/botocore/py3/botocore/validate.py index 8e03b8e2ce..4ba6744fe0 100644 --- a/contrib/python/botocore/py3/botocore/validate.py +++ b/contrib/python/botocore/py3/botocore/validate.py @@ -17,7 +17,6 @@ import decimal import json from datetime import datetime -from botocore.compat import six from botocore.exceptions import ParamValidationError from botocore.utils import is_json_value_header, parse_to_aware_datetime @@ -56,13 +55,18 @@ def type_check(valid_types): def _type_check(param, errors, name): if not isinstance(param, valid_types): - valid_type_names = [six.text_type(t) for t in valid_types] - errors.report(name, 'invalid type', param=param, - valid_types=valid_type_names) + valid_type_names = [str(t) for t in valid_types] + errors.report( + name, + 'invalid type', + param=param, + valid_types=valid_type_names, + ) return False return True return _on_passes_type_check + return _create_type_check_guard @@ -83,7 +87,7 @@ def range_check(name, value, shape, error_type, errors): errors.report(name, error_type, param=value, min_allowed=min_allowed) -class ValidationErrors(object): +class ValidationErrors: def __init__(self): self._errors = [] @@ -103,66 +107,63 @@ class ValidationErrors(object): name = self._get_name(name) if error_type == 'missing required field': return ( - 'Missing required parameter in %s: "%s"' % ( - name, additional['required_name'] - ) + f"Missing required parameter in {name}: " + f"\"{additional['required_name']}\"" ) elif error_type == 'unknown field': + unknown_param = additional['unknown_param'] + valid_names = ', '.join(additional['valid_names']) return ( - 'Unknown parameter in %s: "%s", must be one of: %s' % ( - name, additional['unknown_param'], - ', '.join(additional['valid_names']) - ) + f'Unknown parameter in {name}: "{unknown_param}", ' + f'must be one of: {valid_names}' ) elif error_type == 'invalid type': + param = additional['param'] + param_type = type(param) + valid_types = ', '.join(additional['valid_types']) return ( - 'Invalid type for parameter %s, value: %s, type: %s, ' - 'valid types: %s' % ( - name, additional['param'], - str(type(additional['param'])), - ', '.join(additional['valid_types']) - ) + f'Invalid type for parameter {name}, value: {param}, ' + f'type: {param_type}, valid types: {valid_types}' ) elif error_type == 'invalid range': + param = additional['param'] min_allowed = additional['min_allowed'] return ( - 'Invalid value for parameter %s, value: %s, valid min value: ' - '%s' % (name, additional['param'], min_allowed) + f'Invalid value for parameter {name}, value: {param}, ' + f'valid min value: {min_allowed}' ) elif error_type == 'invalid length': + param = additional['param'] min_allowed = additional['min_allowed'] return ( - 'Invalid length for parameter %s, value: %s, ' - 'valid min length: %s' % ( - name, additional['param'], min_allowed - ) + f'Invalid length for parameter {name}, value: {param}, ' + f'valid min length: {min_allowed}' ) elif error_type == 'unable to encode to json': - return ( - 'Invalid parameter %s must be json serializable: %s' % ( - name, additional['type_error'] - ) + return 'Invalid parameter {} must be json serializable: {}'.format( + name, + additional['type_error'], ) elif error_type == 'invalid type for document': + param = additional['param'] + param_type = type(param) + valid_types = ', '.join(additional['valid_types']) return ( - 'Invalid type for document parameter %s, value: %s, type: %s, ' - 'valid types: %s' % ( - name, - additional['param'], - str(type(additional['param'])), - ', '.join(additional['valid_types']) - ) + f'Invalid type for document parameter {name}, value: {param}, ' + f'type: {param_type}, valid types: {valid_types}' ) elif error_type == 'more than one input': + members = ', '.join(additional['members']) return ( - 'Invalid number of parameters set for tagged union structure ' - '%s. Can only set one of the following keys: ' - '%s.' % (name, '. '.join(additional['members'])) + f'Invalid number of parameters set for tagged union structure ' + f'{name}. Can only set one of the following keys: ' + f'{members}.' ) elif error_type == 'empty input': + members = ', '.join(additional['members']) return ( - 'Must set one of the following keys for tagged union' - 'structure %s: %s.' % (name, '. '.join(additional['members'])) + f'Must set one of the following keys for tagged union' + f'structure {name}: {members}.' ) def _get_name(self, name): @@ -177,7 +178,7 @@ class ValidationErrors(object): self._errors.append((reason, name, kwargs)) -class ParamValidator(object): +class ParamValidator: """Validates parameters against a shape model.""" def validate(self, params, shape): @@ -210,7 +211,8 @@ class ParamValidator(object): special_validator(params, shape, errors, name) else: getattr(self, '_validate_%s' % shape.type_name)( - params, shape, errors, name) + params, shape, errors, name + ) def _validate_jsonvalue_string(self, params, shape, errors, name): # Check to see if a value marked as a jsonvalue can be dumped to @@ -229,15 +231,19 @@ class ParamValidator(object): self._validate_document(params[key], shape, errors, key) elif isinstance(params, list): for index, entity in enumerate(params): - self._validate_document(entity, shape, errors, - '%s[%d]' % (name, index)) - elif not isinstance(params, (six.string_types, int, bool, float)): + self._validate_document( + entity, shape, errors, '%s[%d]' % (name, index) + ) + elif not isinstance(params, ((str,), int, bool, float)): valid_types = (str, int, bool, float, list, dict) - valid_type_names = [six.text_type(t) for t in valid_types] - errors.report(name, 'invalid type for document', - param=params, - param_type=type(params), - valid_types=valid_type_names) + valid_type_names = [str(t) for t in valid_types] + errors.report( + name, + 'invalid type for document', + param=params, + param_type=type(params), + valid_types=valid_type_names, + ) @type_check(valid_types=(dict,)) def _validate_structure(self, params, shape, errors, name): @@ -252,23 +258,35 @@ class ParamValidator(object): # Validate required fields. for required_member in shape.metadata.get('required', []): if required_member not in params: - errors.report(name, 'missing required field', - required_name=required_member, user_params=params) + errors.report( + name, + 'missing required field', + required_name=required_member, + user_params=params, + ) members = shape.members known_params = [] # Validate known params. for param in params: if param not in members: - errors.report(name, 'unknown field', unknown_param=param, - valid_names=list(members)) + errors.report( + name, + 'unknown field', + unknown_param=param, + valid_names=list(members), + ) else: known_params.append(param) # Validate structure members. for param in known_params: - self._validate(params[param], shape.members[param], - errors, '%s.%s' % (name, param)) + self._validate( + params[param], + shape.members[param], + errors, + f'{name}.{param}', + ) - @type_check(valid_types=six.string_types) + @type_check(valid_types=(str,)) def _validate_string(self, param, shape, errors, name): # Validate range. For a string, the min/max contraints # are of the string length. @@ -285,43 +303,45 @@ class ParamValidator(object): member_shape = shape.member range_check(name, len(param), shape, 'invalid length', errors) for i, item in enumerate(param): - self._validate(item, member_shape, errors, '%s[%s]' % (name, i)) + self._validate(item, member_shape, errors, f'{name}[{i}]') @type_check(valid_types=(dict,)) def _validate_map(self, param, shape, errors, name): key_shape = shape.key value_shape = shape.value for key, value in param.items(): - self._validate(key, key_shape, errors, "%s (key: %s)" - % (name, key)) - self._validate(value, value_shape, errors, '%s.%s' % (name, key)) + self._validate(key, key_shape, errors, f"{name} (key: {key})") + self._validate(value, value_shape, errors, f'{name}.{key}') - @type_check(valid_types=six.integer_types) + @type_check(valid_types=(int,)) def _validate_integer(self, param, shape, errors, name): range_check(name, param, shape, 'invalid range', errors) def _validate_blob(self, param, shape, errors, name): - if isinstance(param, (bytes, bytearray, six.text_type)): + if isinstance(param, (bytes, bytearray, str)): return elif hasattr(param, 'read'): # File like objects are also allowed for blob types. return else: - errors.report(name, 'invalid type', param=param, - valid_types=[str(bytes), str(bytearray), - 'file-like object']) + errors.report( + name, + 'invalid type', + param=param, + valid_types=[str(bytes), str(bytearray), 'file-like object'], + ) @type_check(valid_types=(bool,)) def _validate_boolean(self, param, shape, errors, name): pass - @type_check(valid_types=(float, decimal.Decimal) + six.integer_types) + @type_check(valid_types=(float, decimal.Decimal) + (int,)) def _validate_double(self, param, shape, errors, name): range_check(name, param, shape, 'invalid range', errors) _validate_float = _validate_double - @type_check(valid_types=six.integer_types) + @type_check(valid_types=(int,)) def _validate_long(self, param, shape, errors, name): range_check(name, param, shape, 'invalid range', errors) @@ -331,9 +351,10 @@ class ParamValidator(object): # object, or a string that parses to a datetime. is_valid_type = self._type_check_datetime(param) if not is_valid_type: - valid_type_names = [six.text_type(datetime), 'timestamp-string'] - errors.report(name, 'invalid type', param=param, - valid_types=valid_type_names) + valid_type_names = [str(datetime), 'timestamp-string'] + errors.report( + name, 'invalid type', param=param, valid_types=valid_type_names + ) def _type_check_datetime(self, value): try: @@ -345,7 +366,7 @@ class ParamValidator(object): return False -class ParamValidationDecorator(object): +class ParamValidationDecorator: def __init__(self, param_validator, serializer): self._param_validator = param_validator self._serializer = serializer @@ -353,9 +374,11 @@ class ParamValidationDecorator(object): def serialize_to_request(self, parameters, operation_model): input_shape = operation_model.input_shape if input_shape is not None: - report = self._param_validator.validate(parameters, - operation_model.input_shape) + report = self._param_validator.validate( + parameters, operation_model.input_shape + ) if report.has_errors(): raise ParamValidationError(report=report.generate_report()) - return self._serializer.serialize_to_request(parameters, - operation_model) + return self._serializer.serialize_to_request( + parameters, operation_model + ) diff --git a/contrib/python/botocore/py3/botocore/waiter.py b/contrib/python/botocore/py3/botocore/waiter.py index 5e1dd634de..2362eebeda 100644 --- a/contrib/python/botocore/py3/botocore/waiter.py +++ b/contrib/python/botocore/py3/botocore/waiter.py @@ -45,7 +45,8 @@ def create_waiter_with_client(waiter_name, waiter_model, client): single_waiter_config = waiter_model.get_waiter(waiter_name) operation_name = xform_name(single_waiter_config.operation) operation_method = NormalizedOperationMethod( - getattr(client, operation_name)) + getattr(client, operation_name) + ) # Create a new wait method that will serve as a proxy to the underlying # Waiter.wait method. This is needed to attach a docstring to the @@ -58,17 +59,17 @@ def create_waiter_with_client(waiter_name, waiter_model, client): event_emitter=client.meta.events, service_model=client.meta.service_model, service_waiter_model=waiter_model, - include_signature=False + include_signature=False, ) # Rename the waiter class based on the type of waiter. - waiter_class_name = str('%s.Waiter.%s' % ( - get_service_module_name(client.meta.service_model), - waiter_name)) + waiter_class_name = str( + '%s.Waiter.%s' + % (get_service_module_name(client.meta.service_model), waiter_name) + ) # Create the new waiter class - documented_waiter_cls = type( - waiter_class_name, (Waiter,), {'wait': wait}) + documented_waiter_cls = type(waiter_class_name, (Waiter,), {'wait': wait}) # Return an instance of the new waiter class. return documented_waiter_cls( @@ -83,7 +84,7 @@ def is_valid_waiter_error(response): return False -class NormalizedOperationMethod(object): +class NormalizedOperationMethod: def __init__(self, client_method): self._client_method = client_method @@ -94,7 +95,7 @@ class NormalizedOperationMethod(object): return e.response -class WaiterModel(object): +class WaiterModel: SUPPORTED_VERSION = 2 def __init__(self, waiter_config): @@ -124,10 +125,12 @@ class WaiterModel(object): def _verify_supported_version(self, version): if version != self.SUPPORTED_VERSION: raise WaiterConfigError( - error_msg=("Unsupported waiter version, supported version " - "must be: %s, but version of waiter config " - "is: %s" % (self.SUPPORTED_VERSION, - version))) + error_msg=( + "Unsupported waiter version, supported version " + "must be: %s, but version of waiter config " + "is: %s" % (self.SUPPORTED_VERSION, version) + ) + ) def get_waiter(self, waiter_name): try: @@ -137,13 +140,14 @@ class WaiterModel(object): return SingleWaiterConfig(single_waiter_config) -class SingleWaiterConfig(object): +class SingleWaiterConfig: """Represents the waiter configuration for a single waiter. A single waiter is considered the configuration for a single value associated with a named waiter (i.e TableExists). """ + def __init__(self, single_waiter_config): self._config = single_waiter_config @@ -163,7 +167,7 @@ class SingleWaiterConfig(object): return acceptors -class AcceptorConfig(object): +class AcceptorConfig: def __init__(self, config): self.state = config['state'] self.matcher = config['matcher'] @@ -174,17 +178,28 @@ class AcceptorConfig(object): @property def explanation(self): if self.matcher == 'path': - return 'For expression "%s" we matched expected path: "%s"' % (self.argument, self.expected) + return 'For expression "{}" we matched expected path: "{}"'.format( + self.argument, + self.expected, + ) elif self.matcher == 'pathAll': - return 'For expression "%s" all members matched excepted path: "%s"' % (self.argument, self.expected) + return ( + 'For expression "%s" all members matched excepted path: "%s"' + % (self.argument, self.expected) + ) elif self.matcher == 'pathAny': - return 'For expression "%s" we matched expected path: "%s" at least once' % (self.argument, self.expected) + return ( + 'For expression "%s" we matched expected path: "%s" at least once' + % (self.argument, self.expected) + ) elif self.matcher == 'status': return 'Matched expected HTTP status code: %s' % self.expected elif self.matcher == 'error': return 'Matched expected service error code: %s' % self.expected else: - return 'No explanation for unknown waiter type: "%s"' % self.matcher + return ( + 'No explanation for unknown waiter type: "%s"' % self.matcher + ) def _create_matcher_func(self): # An acceptor function is a callable that takes a single value. The @@ -207,7 +222,8 @@ class AcceptorConfig(object): return self._create_error_matcher() else: raise WaiterConfigError( - error_msg="Unknown acceptor: %s" % self.matcher) + error_msg="Unknown acceptor: %s" % self.matcher + ) def _create_path_matcher(self): expression = jmespath.compile(self.argument) @@ -217,6 +233,7 @@ class AcceptorConfig(object): if is_valid_waiter_error(response): return return expression.search(response) == expected + return acceptor_matches def _create_path_all_matcher(self): @@ -237,6 +254,7 @@ class AcceptorConfig(object): if element != expected: return False return True + return acceptor_matches def _create_path_any_matcher(self): @@ -257,6 +275,7 @@ class AcceptorConfig(object): if element == expected: return True return False + return acceptor_matches def _create_status_matcher(self): @@ -267,8 +286,10 @@ class AcceptorConfig(object): # other than it is a dict, so we don't assume there's # a ResponseMetadata.HTTPStatusCode. status_code = response.get('ResponseMetadata', {}).get( - 'HTTPStatusCode') + 'HTTPStatusCode' + ) return status_code == expected + return acceptor_matches def _create_error_matcher(self): @@ -282,10 +303,11 @@ class AcceptorConfig(object): # of an error response will contain the "Error" and # "ResponseMetadata" key. return response.get("Error", {}).get("Code", "") == expected + return acceptor_matches -class Waiter(object): +class Waiter: def __init__(self, name, config, operation_method): """ @@ -334,15 +356,17 @@ class Waiter(object): # can just handle here by raising an exception. raise WaiterError( name=self.name, - reason='An error occurred (%s): %s' % ( + reason='An error occurred (%s): %s' + % ( response['Error'].get('Code', 'Unknown'), response['Error'].get('Message', 'Unknown'), ), last_response=response, ) if current_state == 'success': - logger.debug("Waiting complete, waiter matched the " - "success state.") + logger.debug( + "Waiting complete, waiter matched the " "success state." + ) return if current_state == 'failure': reason = 'Waiter encountered a terminal failure state: %s' % ( @@ -357,8 +381,9 @@ class Waiter(object): if last_matched_acceptor is None: reason = 'Max attempts exceeded' else: - reason = 'Max attempts exceeded. Previously accepted state: %s' % ( - acceptor.explanation + reason = ( + 'Max attempts exceeded. Previously accepted state: %s' + % (acceptor.explanation) ) raise WaiterError( name=self.name, diff --git a/contrib/python/botocore/py3/patches/01-unvendor-six.patch b/contrib/python/botocore/py3/patches/01-unvendor-six.patch index b7eadde22c..6473228490 100644 --- a/contrib/python/botocore/py3/patches/01-unvendor-six.patch +++ b/contrib/python/botocore/py3/patches/01-unvendor-six.patch @@ -6,11 +6,6 @@ @@ -36,1 +36,1 @@ logger = logging.getLogger(__name__) -from botocore.vendored.six.moves import http_client +from six.moves import http_client ---- contrib/python/botocore/py3/botocore/endpoint.py (index) -+++ contrib/python/botocore/py3/botocore/endpoint.py (working tree) -@@ -35,1 +35,1 @@ import logging --from botocore.vendored import six -+from botocore.compat import six --- contrib/python/botocore/py3/botocore/httpsession.py (index) +++ contrib/python/botocore/py3/botocore/httpsession.py (working tree) @@ -62,1 +62,1 @@ except ImportError: @@ -18,6 +13,6 @@ +from six.moves.urllib_parse import unquote --- contrib/python/botocore/py3/botocore/utils.py (index) +++ contrib/python/botocore/py3/botocore/utils.py (working tree) -@@ -85,1 +85,1 @@ from botocore.compat import ( +@@ -84,1 +84,1 @@ from botocore.compat import ( -from botocore.vendored.six.moves.urllib.request import getproxies, proxy_bypass +from six.moves.urllib.request import getproxies, proxy_bypass diff --git a/contrib/python/botocore/py3/patches/02-fix-for-arcadia.patch b/contrib/python/botocore/py3/patches/02-fix-for-arcadia.patch index 276cacb398..b7d6092b5b 100644 --- a/contrib/python/botocore/py3/patches/02-fix-for-arcadia.patch +++ b/contrib/python/botocore/py3/patches/02-fix-for-arcadia.patch @@ -1,6 +1,6 @@ --- contrib/python/botocore/py3/botocore/data/endpoints.json (index) +++ contrib/python/botocore/py3/botocore/data/endpoints.json (working tree) -@@ -18584,6 +18584,46 @@ +@@ -18857,6 +18857,46 @@ } } } @@ -91,7 +91,7 @@ logger = logging.getLogger(__name__) -@@ -174,6 +177,51 @@ class JSONFileLoader(object): +@@ -177,6 +180,51 @@ class JSONFileLoader(object): return json.loads(payload, object_pairs_hook=OrderedDict) @@ -143,16 +143,16 @@ def create_loader(search_path_string=None): """Create a Loader class. -@@ -207,7 +255,7 @@ class Loader(object): - convenience method over ``load_data`` and ``determine_latest_version``. +@@ -211,7 +259,7 @@ class Loader(object): """ + - FILE_LOADER_CLASS = JSONFileLoader + FILE_LOADER_CLASS = HybridJsonLoader # The included models in botocore/data/ that we ship with botocore. BUILTIN_DATA_PATH = os.path.join(BOTOCORE_ROOT, 'data') # For convenience we automatically add ~/.aws/models to the data path. -@@ -283,6 +331,11 @@ class Loader(object): +@@ -296,6 +344,11 @@ class Loader(object): if self.file_loader.exists(full_load_path): services.add(service_name) break @@ -164,7 +164,7 @@ return sorted(services) @instance_cache -@@ -334,6 +387,11 @@ class Loader(object): +@@ -347,6 +400,11 @@ class Loader(object): # to the type_name passed in. if self.file_loader.exists(full_path): known_api_versions.add(dirname) @@ -176,7 +176,7 @@ if not known_api_versions: raise DataNotFoundError(data_path=service_name) return sorted(known_api_versions) -@@ -419,6 +477,12 @@ class Loader(object): +@@ -434,6 +492,12 @@ class Loader(object): found = self.file_loader.load_file(possible_path) if found is not None: return found diff --git a/contrib/python/botocore/py3/patches/03-extended-listings.patch b/contrib/python/botocore/py3/patches/03-extended-listings.patch index f40ec23c95..b300046dca 100644 --- a/contrib/python/botocore/py3/patches/03-extended-listings.patch +++ b/contrib/python/botocore/py3/patches/03-extended-listings.patch @@ -154,7 +154,7 @@ date: 2021-10-27T00:34:16+03:00 "members":{ --- contrib/python/botocore/py3/botocore/handlers.py (73e85ed9c4be248886f10db6b7d83f27d446916c) +++ contrib/python/botocore/py3/botocore/handlers.py (cc02b99f18408f40978b56abfd8e5e9b2c2492d7) -@@ -763,6 +763,27 @@ def decode_list_object_v2(parsed, context, **kwargs): +@@ -800,6 +800,27 @@ def decode_list_object_v2(parsed, context, **kwargs): ) @@ -182,7 +182,7 @@ date: 2021-10-27T00:34:16+03:00 def decode_list_object_versions(parsed, context, **kwargs): # From the documentation: If you specify encoding-type request parameter, # Amazon S3 includes this element in the response, and returns encoded key -@@ -1059,6 +1080,7 @@ BUILTIN_HANDLERS = [ +@@ -1118,6 +1139,7 @@ BUILTIN_HANDLERS = [ ('before-parameter-build.glacier', inject_account_id), ('after-call.s3.ListObjects', decode_list_object), ('after-call.s3.ListObjectsV2', decode_list_object_v2), diff --git a/contrib/restricted/aws/s2n/.yandex_meta/devtools.licenses.report b/contrib/restricted/aws/s2n/.yandex_meta/devtools.licenses.report index 77b0c78928..efc7a692bf 100644 --- a/contrib/restricted/aws/s2n/.yandex_meta/devtools.licenses.report +++ b/contrib/restricted/aws/s2n/.yandex_meta/devtools.licenses.report @@ -171,6 +171,7 @@ BELONGS ya.make Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0 Files with this license: README.md [3:3] + api/s2n.h [20:20] KEEP Brian-Gladman-3-Clause 5c633a0224a7fbc49111f08904d03f31 BELONGS ya.make @@ -239,6 +240,17 @@ BELONGS ya.make pq-crypto/kyber_90s_r2/sha2.h [1:1] pq-crypto/kyber_r2/fips202_kyber_r2.h [1:1] +KEEP Apache-2.0 8824d81a8477cf3e444616f476257401 +BELONGS ya.make + Note: matched license text is too long. Read it in the source files. + Scancode info: + Original SPDX id: Apache-2.0 + Score : 100.00 + Match type : NOTICE + Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0 + Files with this license: + api/s2n.h [4:13] + KEEP MIT 9ff238e19a846d154b5a5b850ea25f0a BELONGS ya.make Note: matched license text is too long. Read it in the source files. @@ -285,7 +297,6 @@ BELONGS ya.make Match type : NOTICE Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0 Files with this license: - api/s2n.h [4:13] crypto/s2n_aead_cipher_aes_gcm.c [4:13] crypto/s2n_aead_cipher_chacha20_poly1305.c [4:13] crypto/s2n_cbc_cipher_3des.c [4:13] diff --git a/contrib/restricted/aws/s2n/CMakeLists.darwin.txt b/contrib/restricted/aws/s2n/CMakeLists.darwin.txt index 13baca35b6..83fac228fc 100644 --- a/contrib/restricted/aws/s2n/CMakeLists.darwin.txt +++ b/contrib/restricted/aws/s2n/CMakeLists.darwin.txt @@ -19,6 +19,7 @@ target_compile_options(restricted-aws-s2n PRIVATE -DS2N_KYBER512R3_AVX2_BMI2 -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX + -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4 -DS2N_MADVISE_SUPPORTED -DS2N___RESTRICT__SUPPORTED -DS2N_HAVE_EXECINFO diff --git a/contrib/restricted/aws/s2n/CMakeLists.linux.txt b/contrib/restricted/aws/s2n/CMakeLists.linux.txt index 02a667dbcb..f6225b6823 100644 --- a/contrib/restricted/aws/s2n/CMakeLists.linux.txt +++ b/contrib/restricted/aws/s2n/CMakeLists.linux.txt @@ -19,6 +19,7 @@ target_compile_options(restricted-aws-s2n PRIVATE -DS2N_KYBER512R3_AVX2_BMI2 -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX + -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4 -DS2N_MADVISE_SUPPORTED -DS2N___RESTRICT__SUPPORTED -DS2N_SIKE_P434_R3_ASM diff --git a/contrib/restricted/aws/s2n/api/s2n.h b/contrib/restricted/aws/s2n/api/s2n.h index ee89829512..3fbbc5d529 100644 --- a/contrib/restricted/aws/s2n/api/s2n.h +++ b/contrib/restricted/aws/s2n/api/s2n.h @@ -13,11 +13,24 @@ * permissions and limitations under the License. */ +/** + * @file s2n.h + * s2n-tls is a C99 implementation of the TLS/SSL protocols that is designed to + * be simple, small, fast, and with security as a priority. <br> It is released and + * licensed under the Apache License 2.0. + */ + #pragma once #if ((__GNUC__ >= 4) || defined(__clang__)) && defined(S2N_EXPORTS) +/** + * Marks a function as belonging to the public s2n API. + */ # define S2N_API __attribute__((visibility("default"))) #else +/** + * Marks a function as belonging to the public s2n API. + */ # define S2N_API #endif /* __GNUC__ >= 4 || defined(__clang__) */ @@ -31,35 +44,99 @@ extern "C" { #include <stdio.h> #include <sys/uio.h> -/* Function return code */ +/** + * Function return code + */ #define S2N_SUCCESS 0 +/** + * Function return code + */ #define S2N_FAILURE -1 -/* Callback return code */ +/** + * Callback return code + */ #define S2N_CALLBACK_BLOCKED -2 +/** + * s2n minimum supported TLS record major version + */ #define S2N_MINIMUM_SUPPORTED_TLS_RECORD_MAJOR_VERSION 2 + +/** + * s2n maximum supported TLS record major version + */ #define S2N_MAXIMUM_SUPPORTED_TLS_RECORD_MAJOR_VERSION 3 + +/** + * s2n SSL 2.0 Version Constant + */ #define S2N_SSLv2 20 + +/** + * s2n SSL 3.0 Version Constant + */ #define S2N_SSLv3 30 + +/** + * s2n TLS 1.0 Version Constant + */ #define S2N_TLS10 31 + +/** + * s2n TLS 1.1 Version Constant + */ #define S2N_TLS11 32 + +/** + * s2n TLS 1.2 Version Constant + */ #define S2N_TLS12 33 + +/** + * s2n TLS 1.3 Version Constant + */ #define S2N_TLS13 34 + +/** + * s2n Unknown TLS Version + */ #define S2N_UNKNOWN_PROTOCOL_VERSION 0 +/** + * s2n-tls functions that return 'int' return 0 to indicate success and -1 to indicate + * failure. + * + * s2n-tls functions that return pointer types return NULL in the case of + * failure. + * + * When an s2n-tls function returns a failure, s2n_errno will be set to a value + * corresponding to the error. This error value can be translated into a string + * explaining the error in English by calling s2n_strerror(s2n_errno, "EN"). + * A string containing human readable error name; can be generated with `s2n_strerror_name`. + * A string containing internal debug information, including filename and line number, can be generated with `s2n_strerror_debug`. + * This string is useful to include when reporting issues to the s2n-tls development team. + * + * @warning To avoid possible confusion, s2n_errno should be cleared after processing an error: `s2n_errno = S2N_ERR_T_OK` + */ S2N_API extern __thread int s2n_errno; /** - * Returns the address of the thread-local `s2n_errno` variable - * * This function can be used instead of trying to resolve `s2n_errno` directly * in runtimes where thread-local variables may not be easily accessible. + * + * @returns The address of the thread-local `s2n_errno` variable */ S2N_API extern int *s2n_errno_location(void); +/** + * Used to help applications determine why an s2n-tls function failed. + * + * This enum is optimized for use in C switch statements. Each value in the enum represents + * an error "category". + */ typedef enum { S2N_ERR_T_OK=0, S2N_ERR_T_IO, @@ -71,16 +148,35 @@ typedef enum { S2N_ERR_T_USAGE } s2n_error_type; +/** + * Gets the category of error from an error. + * + * s2n-tls organizes errors into different "types" to allow applications to do logic on error values without catching all possibilities. + * Applications using non-blocking I/O should check error type to determine if the I/O operation failed because + * it would block or for some other error. + * + * @param error The error from s2n. Usually this is `s2n_errno`. + * @returns An s2n_error_type + */ S2N_API extern int s2n_error_get_type(int error); +/** + * An opaque configuration object, used by clients and servers for holding cryptographic certificates, keys and preferences. + */ struct s2n_config; + +/** + * An opaque connection. Used to track each s2n connection. + */ struct s2n_connection; /** * Prevents S2N from calling `OPENSSL_crypto_init`/`OPENSSL_cleanup`/`EVP_cleanup` on OpenSSL versions * prior to 1.1.x. This allows applications or languages that also init OpenSSL to interoperate * with S2N. + * + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_crypto_disable_init(void); @@ -88,81 +184,361 @@ extern int s2n_crypto_disable_init(void); /** * Prevents S2N from installing an atexit handler, which allows safe shutdown of S2N from within a * re-entrant shared library + * + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_disable_atexit(void); +/** + * Fetches the OpenSSL version s2n-tls was compiled with. This can be used by applications to validate at runtime + * that the versions of s2n-tls and Openssl that they have loaded are correct. + * + * @returns the version number of OpenSSL that s2n-tls was compiled with + */ S2N_API extern unsigned long s2n_get_openssl_version(void); + +/** + * Initializes the s2n-tls library and should be called once in your application, before any other s2n-tls + * functions are called. Failure to call s2n_init() will result in errors from other s2n-tls functions. + * + * @warning This function is not thread safe and should only be called once. + * + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_init(void); + +/** + * Cleans up any internal resources used by s2n-tls. This function should be called from each thread or process + * that is created subsequent to calling `s2n_init` when that thread or process is done calling other s2n-tls functions. + * + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_cleanup(void); + +/** + * Create a new s2n_config object. This object can (and should) be associated with many connection objects. + * + * @returns returns a new configuration object suitable for associating certs and keys. + */ S2N_API extern struct s2n_config *s2n_config_new(void); + +/** + * Frees the memory associated with an `s2n_config` object. + * + * @param config The configuration object being freed + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_config_free(struct s2n_config *config); + +/** + * Frees the DH params associated with an `s2n_config` object. + * + * @param config The configuration object with DH params being freed + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_config_free_dhparams(struct s2n_config *config); + +/** + * Frees the certificate chain and key associated with an `s2n_config` object. + * + * @param config The configuration object with DH params being freed + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_config_free_cert_chain_and_key(struct s2n_config *config); +/** + * Callback function type used to get the system time. + * + * Takes two arguments. A pointer to arbitrary data for use within the callback and a pointer to a 64 bit int. + * the callback and a pointer to a 64 bit int. The 64 bit pointer should be set to the + * number of nanoseconds since the Unix epoch. + * + * The function should return 0 on success and -1 on failure. + */ typedef int (*s2n_clock_time_nanoseconds) (void *, uint64_t *); + +/** + * Cache callback function that allows the caller to retrieve SSL session data + * from a cache. + * + * The callback function takes six arguments: + * a pointer to the s2n_connection object, + * a pointer to arbitrary data for use within the callback, + * a pointer to a key which can be used to retrieve the cached entry, + * a 64 bit unsigned integer specifying the size of this key, + * a pointer to a memory location where the value should be stored, + * and a pointer to a 64 bit unsigned integer specifying the size of this value. + * + * Initially *value_size will be set to the amount of space allocated for the value, + * the callback should set *value_size to the actual size of the data returned. + * If there is insufficient space, -1 should be returned. + * If the cache is not ready to provide data for the request, + * S2N_CALLBACK_BLOCKED should be returned. + * + * This will cause s2n_negotiate() to return S2N_BLOCKED_ON_APPLICATION_INPUT. + */ typedef int (*s2n_cache_retrieve_callback) (struct s2n_connection *conn, void *, const void *key, uint64_t key_size, void *value, uint64_t *value_size); + +/** + * Cache callback function that allows the caller to store SSL session data in a + * cache. + * + * The callback function takes seven arguments: + * a pointer to the s2n_connection object, + * a pointer to arbitrary data for use within the callback, + * a 64-bit unsigned integer specifying the number of seconds the session data may be stored for, + * a pointer to a key which can be used to retrieve the cached entry, + * a 64 bit unsigned integer specifying the size of this key, + * a pointer to a value which should be stored, + * and a 64 bit unsigned integer specified the size of this value. + */ typedef int (*s2n_cache_store_callback) (struct s2n_connection *conn, void *, uint64_t ttl_in_seconds, const void *key, uint64_t key_size, const void *value, uint64_t value_size); + +/** +* Cache callback function that allows the caller to set a callback function +* that will be used to delete SSL session data from a cache. +* +* The callback function takes four arguments: +* a pointer to s2n_connection object, +* a pointer to arbitrary data for use within the callback, +* a pointer to a key which can be used to delete the cached entry, +* and a 64 bit unsigned integer specifying the size of this key. +*/ typedef int (*s2n_cache_delete_callback) (struct s2n_connection *conn, void *, const void *key, uint64_t key_size); +/** + * Allows the caller to set a callback function that will be used to get the + * system time. + * @param config The configuration object being updated + * @param clock_fn The wall clock time callback function + * @param ctx An opaque pointer that the callback will be invoked with + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_config_set_wall_clock(struct s2n_config *config, s2n_clock_time_nanoseconds clock_fn, void *ctx); + +/** + * Allows the caller to set a callback function that will be used to get + * monotonic time. + * @param config The configuration object being updated + * @param clock_fn The monotonic time callback function + * @param ctx An opaque pointer that the callback will be invoked with + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_config_set_monotonic_clock(struct s2n_config *config, s2n_clock_time_nanoseconds clock_fn, void *ctx); +/** + * Translates an s2n_error code to a human readable string explaining the error. + * + * @param error The error code to explain. Usually this is s2n_errno + * @param lang The language to explain the error code. Pass "EN" or NULL for English. + * @returns The error string + */ S2N_API extern const char *s2n_strerror(int error, const char *lang); + +/** + * Translates an s2n_error code to a human readable string containing internal debug + * information, including file name and line number. This function is useful when + * reporting issues to the s2n-tls development team. + * + * @param error The error code to explain. Usually this is s2n_errno + * @param lang The language to explain the error code. Pass "EN" or NULL for English. + * @returns The error string + */ S2N_API extern const char *s2n_strerror_debug(int error, const char *lang); + +/** + * Translates an s2n_error code to a human readable string. + * + * @param error The error code to explain. Usually this is s2n_errno + * @returns The error string + */ S2N_API extern const char *s2n_strerror_name(int error); +/** + * Opaque stack trace structure. + */ struct s2n_stacktrace; + +/** + * Checks if s2n stack trace captures are enabled. + * + * @returns True if stack traces are enabled. False if they are disabled. + */ S2N_API extern bool s2n_stack_traces_enabled(void); + +/** + * Configures the s2n stack trace captures option. + * + * @param newval Boolean to determine if stack traces should be enabled. True to enable them. False to disable them. + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_stack_traces_enabled_set(bool newval); + +/** + * Calculates the s2n stack trace. + * + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_calculate_stacktrace(void); + +/** + * Prints the s2n stack trace to a file. The file descriptor is expected to be + * open and ready for writing. + * + * @param fptr A pointer to the file s2n-tls should write the stack trace to. + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_print_stacktrace(FILE *fptr); + +/** + * Clean up the memory used to contain the stack trace. + * + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_free_stacktrace(void); + +/** + * Export the s2n_stacktrace. + * + * @param trace A pointer to the s2n_stacktrace to fill. + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_get_stacktrace(struct s2n_stacktrace *trace); +/** + * Allows the caller to set a callback function that will be used to store SSL + * session data in a cache. + * + * @param config The configuration object being updated + * @param cache_store_callback The cache store callback function. + * @param data An opaque context pointer that the callback will be invoked with. + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_config_set_cache_store_callback(struct s2n_config *config, s2n_cache_store_callback cache_store_callback, void *data); + +/** + * Allows the caller to set a callback function that will be used to retrieve SSL + * session data from a cache. + * + * @param config The configuration object being updated + * @param cache_retrieve_callback The cache retrieve callback function. + * @param data An opaque context pointer that the callback will be invoked with. + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_config_set_cache_retrieve_callback(struct s2n_config *config, s2n_cache_retrieve_callback cache_retrieve_callback, void *data); + +/** + * Allows the caller to set a callback function that will be used to delete SSL + * session data from a cache. + * + * @param config The configuration object being updated + * @param cache_delete_callback The cache delete callback function. + * @param data An opaque context pointer that the callback will be invoked with. + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_config_set_cache_delete_callback(struct s2n_config *config, s2n_cache_delete_callback cache_delete_callback, void *data); +/** + * A function that will be called when s2n-tls is initialized. + */ typedef int (*s2n_mem_init_callback)(void); + +/** + * Will be called when `s2n_cleanup` is executed. + */ typedef int (*s2n_mem_cleanup_callback)(void); + +/** + * A function that can allocate at least `requested` bytes of memory and + * store the location of that memory in **\*ptr**, and the size of the allocated + * data in **\*allocated**. The function may choose to allocate more memory + * than was requested. s2n-tls will consider all allocated memory available for + * use, and will attempt to free all allocated memory when able. + */ typedef int (*s2n_mem_malloc_callback)(void **ptr, uint32_t requested, uint32_t *allocated); + +/** + * A function that can free memory. + */ typedef int (*s2n_mem_free_callback)(void *ptr, uint32_t size); +/** + * Allows the caller to over-ride s2n-tls's internal memory handling functions. + * + * @warning This function must be called before s2n_init(). + * + * @param mem_init_callback The s2n_mem_init_callback + * @param mem_cleanup_callback The s2n_mem_cleanup_callback + * @param mem_malloc_callback The s2n_mem_malloc_callback + * @param mem_free_callback The s2n_mem_free_callback + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_mem_set_callbacks(s2n_mem_init_callback mem_init_callback, s2n_mem_cleanup_callback mem_cleanup_callback, s2n_mem_malloc_callback mem_malloc_callback, s2n_mem_free_callback mem_free_callback); +/** + * A callback function that will be called when s2n-tls is initialized. + */ typedef int (*s2n_rand_init_callback)(void); + +/** + * A callback function that will be called when `s2n_cleanup` is executed. + */ typedef int (*s2n_rand_cleanup_callback)(void); + +/** + * A callback function that will be used to provide entropy to the s2n-tls + * random number generators. + */ typedef int (*s2n_rand_seed_callback)(void *data, uint32_t size); + +/** + * A callback function that will be used to mix in entropy every time the RNG + * is invoked. + */ typedef int (*s2n_rand_mix_callback)(void *data, uint32_t size); +/** + * Allows the caller to over-ride s2n-tls's entropy functions. + * + * @warning This function must be called before s2n_init(). + * + * @param rand_init_callback The s2n_rand_init_callback + * @param rand_cleanup_callback The s2n_rand_cleanup_callback + * @param rand_seed_callback The s2n_rand_seed_callback + * @param rand_mix_callback The s2n_rand_mix_callback + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_rand_set_callbacks(s2n_rand_init_callback rand_init_callback, s2n_rand_cleanup_callback rand_cleanup_callback, s2n_rand_seed_callback rand_seed_callback, s2n_rand_mix_callback rand_mix_callback); +/** + * TLS extensions supported by s2n-tls + */ typedef enum { S2N_EXTENSION_SERVER_NAME = 0, S2N_EXTENSION_MAX_FRAG_LEN = 1, @@ -175,6 +551,9 @@ typedef enum { S2N_EXTENSION_RENEGOTIATION_INFO = 65281, } s2n_tls_extension_type; +/** + * MFL configurations from https://datatracker.ietf.org/doc/html/rfc6066#section-4. + */ typedef enum { S2N_TLS_MAX_FRAG_LEN_512 = 1, S2N_TLS_MAX_FRAG_LEN_1024 = 2, @@ -182,44 +561,237 @@ typedef enum { S2N_TLS_MAX_FRAG_LEN_4096 = 4, } s2n_max_frag_len; +/** + * Opaque certificate type. + */ struct s2n_cert; + +/** + * Opaque certificate chain and key type. + */ struct s2n_cert_chain_and_key; + +/** + * Opaque key type. + */ struct s2n_pkey; + +/** + * Opaque public key type. + */ typedef struct s2n_pkey s2n_cert_public_key; + +/** + * Opaque private key type. + */ typedef struct s2n_pkey s2n_cert_private_key; +/** + * Creates a new s2n_cert_chain_and_key object. This object can be associated + * with many config objects. It is used to represent a certificate and key pair. + * + * @returns A new object used to represent a certificate-chain/key pair + */ S2N_API extern struct s2n_cert_chain_and_key *s2n_cert_chain_and_key_new(void); + +/** + * Associates a certificate chain and private key with an `s2n_cert_chain_and_key` object. + * + * `cert_chain_pem` should be a PEM encoded certificate chain, with the first + * certificate in the chain being your leaf certificate. `private_key_pem` + * should be a PEM encoded private key corresponding to the leaf certificate. + * + * @note Prefer using s2n_cert_chain_and_key_load_pem_bytes. + * + * @param chain_and_key The certificate chain and private key handle + * @param chain_pem A byte array of a PEM encoded certificate chain. + * @param private_key_pem A byte array of a PEM encoded key. + * + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_cert_chain_and_key_load_pem(struct s2n_cert_chain_and_key *chain_and_key, const char *chain_pem, const char *private_key_pem); + +/** + * Associates a certificate chain and private key with an `s2n_cert_chain_and_key` object. + * + * `cert_chain_pem` should be a PEM encoded certificate chain, with the first + * certificate in the chain being your leaf certificate. `private_key_pem` + * should be a PEM encoded private key corresponding to the leaf certificate. + * + * @param chain_and_key The certificate chain and private key handle + * @param chain_pem A byte array of a PEM encoded certificate chain. + * @param chain_pem_len Size of `chain_pem` + * @param private_key_pem A byte array of a PEM encoded key. + * @param private_key_pem_len Size of `private_key_pem` + * + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_cert_chain_and_key_load_pem_bytes(struct s2n_cert_chain_and_key *chain_and_key, uint8_t *chain_pem, uint32_t chain_pem_len, uint8_t *private_key_pem, uint32_t private_key_pem_len); + +/** + * Associates a public certificate chain with a `s2n_cert_chain_and_key` object. It does + * NOT set a private key, so the connection will need to be configured to + * [offload private key operations](https://github.com/aws/s2n-tls/blob/main/docs/USAGE-GUIDE.md#offloading-asynchronous-private-key-operations). + * + * @param chain_and_key The certificate chain and private key handle + * @param chain_pem A byte array of a PEM encoded certificate chain. + * @param chain_pem_len Size of `chain_pem` + * + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_cert_chain_and_key_load_public_pem_bytes(struct s2n_cert_chain_and_key *chain_and_key, uint8_t *chain_pem, uint32_t chain_pem_len); + +/** + * Frees the memory associated with an `s2n_cert_chain_and_key` object. + * + * @param cert_and_key The certificate chain and private key handle + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_cert_chain_and_key_free(struct s2n_cert_chain_and_key *cert_and_key); + +/** + * Adds a context to the `s2n_cert_chain_and_key` object. + * + * @param cert_and_key The certificate chain and private key handle + * @param ctx An opaque pointer to user supplied data. + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_cert_chain_and_key_set_ctx(struct s2n_cert_chain_and_key *cert_and_key, void *ctx); + +/** + * Get the user supplied context from the `s2n_cert_chain_and_key` object. + * + * @param cert_and_key The certificate chain and private key handle + * @returns The user supplied pointer from s2n_cert_chain_and_key_set_ctx() + */ S2N_API extern void *s2n_cert_chain_and_key_get_ctx(struct s2n_cert_chain_and_key *cert_and_key); + +/** + * Get the private key from the `s2n_cert_chain_and_key` object. + * + * @param cert_and_key The certificate chain and private key handle + * @returns A pointer to the `s2n_cert_private_key` + */ S2N_API extern s2n_cert_private_key *s2n_cert_chain_and_key_get_private_key(struct s2n_cert_chain_and_key *cert_and_key); +/** + * A callback function that is invoked if s2n-tls cannot resolve a conflict between + * two certificates with the same domain name. This function is invoked while certificates + * are added to an `s2n_config`. + * + * Currently, the only builtin resolution for domain name conflicts is certificate type(RSA, + * ECDSA, etc). The callback should return a pointer to the `s2n_cert_chain_and_key` that + * should be used for dns name `name`. + * + * If NULL is returned, the first certificate will be used. Typically an application + * will use properties like trust and expiry to implement tiebreaking. + */ typedef struct s2n_cert_chain_and_key* (*s2n_cert_tiebreak_callback) (struct s2n_cert_chain_and_key *cert1, struct s2n_cert_chain_and_key *cert2, uint8_t *name, uint32_t name_len); + +/** + * Sets the `s2n_cert_tiebreak_callback` for resolving domain name conflicts. + * If no callback is set, the first certificate added for a domain name will always be preferred. + * + * @param config The configuration object being updated + * @param cert_tiebreak_cb The pointer to the certificate tiebreak function + * + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_config_set_cert_tiebreak_callback(struct s2n_config *config, s2n_cert_tiebreak_callback cert_tiebreak_cb); +/** + * Associates a certificate chain and a private key, with an `s2n_config` object. + * At present, only one certificate-chain/key pair may be associated with a config. + * `cert_chain_pem` should be a PEM encoded certificate chain, with the first certificate + * in the chain being your servers certificate. `private_key_pem` should be a + * PEM encoded private key corresponding to the server certificate. + * + * @param config The configuration object being updated + * @param cert_chain_pem A byte array of a PEM encoded certificate chain. + * @param private_key_pem A byte array of a PEM encoded key. + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_config_add_cert_chain_and_key(struct s2n_config *config, const char *cert_chain_pem, const char *private_key_pem); + +/** + * The preferred method of associating a certificate chain and private key pair with an `s2n_config` object. + * This method may be called multiple times to support multiple key types(RSA, ECDSA) and multiple domains. + * On the server side, the certificate selected will be based on the incoming SNI value and the + * client's capabilities(supported ciphers). + * + * In the case of no certificate matching the client's SNI extension or if no SNI extension was sent by + * the client, the certificate from the `first` call to `s2n_config_add_cert_chain_and_key_to_store` + * will be selected. + * + * @warning It is not recommended to free or modify the `cert_key_pair` as any subsequent changes will be + * reflected in the config. + * + * @param config The configuration object being updated + * @param cert_key_pair The certificate chain and private key handle + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_config_add_cert_chain_and_key_to_store(struct s2n_config *config, struct s2n_cert_chain_and_key *cert_key_pair); + +/** + * Explicitly sets certificate chain and private key pairs to be used as defaults for each auth + * method (key type). A "default" certificate is used when there is not an SNI match with any other + * configured certificate. + * + * Only one certificate can be set as the default per auth method (one RSA default, one ECDSA default, + * etc.). All previous default certificates will be cleared and re-set when this API is called. + * + * This API is called for a specific `s2n_config` object. s2n-tls will attempt to automatically choose + * default certificates for each auth method (key type) based on the order that `s2n_cert_chain_and_key` + * are added to the `s2n_config` using one of the APIs listed above. + * `s2n_config_set_cert_chain_and_key_defaults` can be called at any time; s2n-tls will clear defaults + * and no longer attempt to automatically choose any default certificates. + * + * @param config The configuration object being updated + * @param cert_key_pairs An array of certificate chain and private key handles + * @param num_cert_key_pairs The amount of handles in cert_key_pairs + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_config_set_cert_chain_and_key_defaults(struct s2n_config *config, struct s2n_cert_chain_and_key **cert_key_pairs, uint32_t num_cert_key_pairs); +/** + * Adds to the trust store from a CA file or directory containing trusted certificates. + * To completely override those locations, call s2n_config_wipe_trust_store() before calling + * this function. + * + * @note The trust store will be initialized with the common locations for the host + * operating system by default. + * @param config The configuration object being updated + * @param ca_pem_filename A string for the file path of the CA PEM file. + * @param ca_dir A string for the directory of the CA PEM files. + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_config_set_verification_ca_location(struct s2n_config *config, const char *ca_pem_filename, const char *ca_dir); + +/** + * Adds a PEM to the trust store. This will allocate memory, and load PEM into the + * Trust Store. Note that the trust store will be initialized with the common locations + * for the host operating system by default. To completely override those locations, + * call s2n_config_wipe_trust_store before calling this function. + * + * @param config The configuration object being updated + * @param pem The string value of the PEM certificate. + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_config_add_pem_to_trust_store(struct s2n_config *config, const char *pem); @@ -233,25 +805,100 @@ extern int s2n_config_add_pem_to_trust_store(struct s2n_config *config, const ch * * @param config The configuration object being updated * - * @return 0 on success and -1 on error + * @returns 0 on success and -1 on error */ S2N_API extern int s2n_config_wipe_trust_store(struct s2n_config *config); +/** + * A callback function invoked (usually multiple times) during X.509 validation for each + * name encountered in the leaf certificate. + * + * Return 1 to trust that hostname or 0 to not trust the hostname. + * + * If this function returns 1, then the certificate is considered trusted and that portion + * of the X.509 validation will succeed. + * + * If no hostname results in a 1 being returned, the certificate will be untrusted and the + * validation will terminate immediately. The default behavior is to reject all host names + * found in a certificate if client mode or client authentication is being used. + * + * Data is a opaque user context set in s2n_config_set_verify_host_callback(). + */ typedef uint8_t (*s2n_verify_host_fn) (const char *host_name, size_t host_name_len, void *data); -/* will be inherited by s2n_connection. If s2n_connection specifies a callback, that callback will be used for that connection. */ + +/** + * Sets the callback to use for verifying that a hostname from an X.509 certificate is trusted. + * By default, no certificate will be trusted. To override this behavior, set this callback. + * + * This change will be inherited by s2n_connections using this config. If s2n_connection specifies + * a callback, that callback will be used for that connection. + * + * If a separate callback for different connections using the same config is desired, + * see s2n_connection_set_verify_host_callback(). + * + * @param config The configuration object being updated + * @param data A user supplied opaque context to pass back to the callback + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_config_set_verify_host_callback(struct s2n_config *config, s2n_verify_host_fn, void *data); +/** + * Toggles whether or not to validate stapled OCSP responses. + * + * 1 means OCSP responses will be validated when they are encountered, while 0 means this step will + * be skipped. + * + * The default value is 1 if the underlying libCrypto implementation supports OCSP. + * + * @param config The configuration object being updated + * @param check_ocsp The desired OCSP response check configuration + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_config_set_check_stapled_ocsp_response(struct s2n_config *config, uint8_t check_ocsp); + +/** + * Turns off all X.509 validation during the negotiation phase of the connection. This should only + * be used for testing or debugging purposes. + * + * @param config The configuration object being updated + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_config_disable_x509_verification(struct s2n_config *config); + +/** + * Sets the maximum allowed depth of a cert chain used for X509 validation. The default value is + * 7. If this limit is exceeded, validation will fail if s2n_config_disable_x509_verification() + * has not been called. 0 is an illegal value and will return an error. + * 1 means only a root certificate will be used. + * + * @param config The configuration object being updated + * @param max_depth The number of allowed certificates in the certificate chain + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_config_set_max_cert_chain_depth(struct s2n_config *config, uint16_t max_depth); +/** + * Associates a set of Diffie-Hellman parameters with an `s2n_config` object. + * @note `dhparams_pem` should be PEM encoded DH parameters. + * + * @param config The configuration object being updated + * @param dhparams_pem A string containing the PEM encoded DH parameters. + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_config_add_dhparams(struct s2n_config *config, const char *dhparams_pem); + +/** + * Sets the security policy that includes the cipher/kem/signature/ecc preferences and + * protocol version. + * + * See the [USAGE-GUIDE.md](https://github.com/aws/s2n-tls/blob/main/docs/USAGE-GUIDE.md) for how to use security policies. + */ S2N_API extern int s2n_config_set_cipher_preferences(struct s2n_config *config, const char *version); @@ -267,157 +914,706 @@ extern int s2n_config_set_cipher_preferences(struct s2n_config *config, const ch S2N_API extern int s2n_config_append_protocol_preference(struct s2n_config *config, const uint8_t *protocol, uint8_t protocol_len); +/** + * Sets the application protocol preferences on an `s2n_config` object. + * `protocols` is a list in order of preference, with most preferred protocol first, and of + * length `protocol_count`. + * + * When acting as an `S2N_CLIENT` the protocol list is included in the Client Hello message + * as the ALPN extension. + * + * As an `S2N_SERVER`, the list is used to negotiate a mutual application protocol with the + * client. After the negotiation for the connection has completed, the agreed upon protocol + * can be retrieved with s2n_get_application_protocol() + * + * @param config The configuration object being updated + * @param protocols The list of preferred protocols, in order of preference + * @param protocol_count The size of the protocols list + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_config_set_protocol_preferences(struct s2n_config *config, const char * const *protocols, int protocol_count); + +/** + * Enum used to define the type, if any, of certificate status request + * an S2N_CLIENT should make during the handshake. The only supported status request type is + * OCSP, `S2N_STATUS_REQUEST_OCSP`. +*/ typedef enum { S2N_STATUS_REQUEST_NONE = 0, S2N_STATUS_REQUEST_OCSP = 1 } s2n_status_request_type; + +/** + * Sets up an S2N_CLIENT to request the server certificate status during an SSL handshake. If set + * to S2N_STATUS_REQUEST_NONE, no status request is made. + * + * @param config The configuration object being updated + * @param type The desired request status type + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_config_set_status_request_type(struct s2n_config *config, s2n_status_request_type type); + +/** + * Enum to set Certificate Transparency Support level. + */ typedef enum { S2N_CT_SUPPORT_NONE = 0, S2N_CT_SUPPORT_REQUEST = 1 } s2n_ct_support_level; + +/** + * Set the Certificate Transparency Support level. + * + * @param config The configuration object being updated + * @param level The desired Certificate Transparency Support configuration + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_config_set_ct_support_level(struct s2n_config *config, s2n_ct_support_level level); + +/** + * Sets whether or not a connection should terminate on receiving a WARNING alert from its peer. + * + * `alert_behavior` can take the following values: + * - `S2N_ALERT_FAIL_ON_WARNINGS` default behavior: s2n-tls will terminate the connection if its peer sends a WARNING alert. + * - `S2N_ALERT_IGNORE_WARNINGS` - with the exception of `close_notify` s2n-tls will ignore all WARNING alerts and keep communicating with its peer. This setting is ignored in TLS1.3 + * + * @note TLS1.3 terminates a connection for all alerts except user_canceled. + */ typedef enum { S2N_ALERT_FAIL_ON_WARNINGS = 0, S2N_ALERT_IGNORE_WARNINGS = 1 } s2n_alert_behavior; + +/** + * Sets the config's alert behavior based on the `s2n_alert_behavior` enum. + * + * @param config The configuration object being updated + * @param alert_behavior The desired alert behavior. + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_config_set_alert_behavior(struct s2n_config *config, s2n_alert_behavior alert_behavior); + +/** + * Sets the extension data in the `s2n_config` object for the specified extension. + * This method will clear any existing data that is set. If the data and length + * parameters are set to NULL, no new data is set in the `s2n_config` object, + * effectively clearing existing data. + * + * @param config The configuration object being updated + * @param type The extension type + * @param data Data for the extension + * @param length Length of the `data` buffer + */ S2N_API extern int s2n_config_set_extension_data(struct s2n_config *config, s2n_tls_extension_type type, const uint8_t *data, uint32_t length); + +/** + * Allows the caller to set a TLS Maximum Fragment Length extension that will be used + * to fragment outgoing messages. s2n-tls currently does not reject fragments larger + * than the configured maximum when in server mode. The TLS negotiated maximum fragment + * length overrides the preference set by the `s2n_connection_prefer_throughput` and + * `s2n_connection_prefer_low_latency`. + * + * @param config The configuration object being updated + * @param mfl_code The selected MFL size + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_config_send_max_fragment_length(struct s2n_config *config, s2n_max_frag_len mfl_code); + +/** + * Allows the server to opt-in to accept client's TLS maximum fragment length extension + * requests. If this API is not called, and client requests the extension, server will ignore + * the request and continue TLS handshake with default maximum fragment length of 8k bytes + * + * @param config The configuration object being updated + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_config_accept_max_fragment_length(struct s2n_config *config); +/** + * Sets the lifetime of the cached session state. The default value is 15 hours. + * + * @param config The configuration object being updated + * @param lifetime_in_secs The desired lifetime of the session state in seconds + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_config_set_session_state_lifetime(struct s2n_config *config, uint64_t lifetime_in_secs); +/** + * Enable or disable session resumption using session ticket. + * + * @param config The configuration object being updated + * @param enabled The configuration object being updated. Set to 1 to enable. Set to 0 to disable. + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_config_set_session_tickets_onoff(struct s2n_config *config, uint8_t enabled); + +/** + * Enable or disable session caching. + * + * @param config The configuration object being updated + * @param enabled The configuration object being updated. Set to 1 to enable. Set to 0 to disable. + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_config_set_session_cache_onoff(struct s2n_config *config, uint8_t enabled); + +/** + * Sets how long a session ticket key will be in a state where it can be used for both encryption + * and decryption of tickets on the server side. + * + * @note The default value is 2 hours. + * @param config The configuration object being updated + * @param lifetime_in_secs The desired lifetime of decrypting and encrypting tickets in seconds + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_config_set_ticket_encrypt_decrypt_key_lifetime(struct s2n_config *config, uint64_t lifetime_in_secs); + +/** + * Sets how long a session ticket key will be in a state where it can used just for decryption of + * already assigned tickets on the server side. Once decrypted, the session will resume and the + * server will issue a new session ticket encrypted using a key in encrypt-decrypt state. + * + * @note The default value is 13 hours. + * @param config The configuration object being updated + * @param lifetime_in_secs The desired lifetime of decrypting and encrypting tickets in seconds + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_config_set_ticket_decrypt_key_lifetime(struct s2n_config *config, uint64_t lifetime_in_secs); + + +/** + * Adds session ticket key on the server side. It would be ideal to add new keys after every + * (encrypt_decrypt_key_lifetime_in_nanos/2) nanos because this will allow for gradual and + * linear transition of a key from encrypt-decrypt state to decrypt-only state. + * + * @param config The configuration object being updated + * @param name Name of the session ticket key that should be randomly generated to avoid collisions + * @param name_len Length of session ticket key name + * @param key Key used to perform encryption/decryption of session ticket + * @param key_len Length of the session ticket key + * @param intro_time_in_seconds_from_epoch Time at which the session ticket key is introduced. If this is 0, then intro_time_in_seconds_from_epoch is set to now. + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_config_add_ticket_crypto_key(struct s2n_config *config, const uint8_t *name, uint32_t name_len, uint8_t *key, uint32_t key_len, uint64_t intro_time_in_seconds_from_epoch); - +/** + * Sets user defined context on the `s2n_config` object. + * + * @param config The configuration object being updated + * @param ctx A pointer to the user defined ctx. + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_config_set_ctx(struct s2n_config *config, void *ctx); + +/** + * Gets the user defined context from the `s2n_config` object. + * The context is set by calling s2n_config_set_ctx() + * + * @param config The configuration object being accessed + * @param ctx A pointer to the user defined ctx. + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_config_get_ctx(struct s2n_config *config, void **ctx); +/** + * Used to declare connections as server or client type, respectively. + */ typedef enum { S2N_SERVER, S2N_CLIENT } s2n_mode; + +/** + * Creates a new connection object. Each s2n-tls SSL/TLS connection uses + * one of these objects. These connection objects can be operated on by up + * to two threads at a time, one sender and one receiver, but neither sending + * nor receiving are atomic, so if these objects are being called by multiple + * sender or receiver threads, you must perform your own locking to ensure + * that only one sender or receiver is active at a time. + * + * The `mode` parameters specifies if the caller is a server, or is a client. + * Connections objects are re-usable across many connections, and should be + * re-used (to avoid deallocating and allocating memory). You should wipe + * connections immediately after use. + * + * @param mode The desired connection type + * @returns A s2n_connection handle + */ S2N_API extern struct s2n_connection *s2n_connection_new(s2n_mode mode); + +/** + * Associates a configuration object with a connection. + * + * @param conn The connection object being associated + * @param config The configuration object being associated + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_connection_set_config(struct s2n_connection *conn, struct s2n_config *config); +/** + * Sets user defined context in `s2n_connection` object. + * + * @param conn The connection object being updated + * @param ctx A pointer to the user defined context + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_connection_set_ctx(struct s2n_connection *conn, void *ctx); + +/** + * Gets user defined context from a `s2n_connection` object. + * + * @param conn The connection object that contains the desired context + */ S2N_API extern void *s2n_connection_get_ctx(struct s2n_connection *conn); +/** + * The callback function takes a s2n-tls connection as input, which receives the ClientHello + * and the context previously provided in `s2n_config_set_client_hello_cb`. The callback can + * access any ClientHello information from the connection and use the `s2n_connection_set_config` + * call to change the config of the connection. + */ typedef int s2n_client_hello_fn(struct s2n_connection *conn, void *ctx); + +/** + * Client Hello callback modes + * - `S2N_CLIENT_HELLO_CB_BLOCKING` (default): + * - In this mode s2n-tls expects the callback to complete its work and return the appropriate response code before the handshake continues. If any of the connection properties were changed based on the server_name extension the callback must either return a value greater than 0 or invoke `s2n_connection_server_name_extension_used`, otherwise the callback returns 0 to continue the handshake. + * - `S2N_CLIENT_HELLO_CB_NONBLOCKING`: + * - In non-blocking mode, s2n-tls expects the callback to not complete its work. If the callback returns a response code of 0 s2n-tls will return `S2N_FAILURE` with `S2N_ERR_T_BLOCKED` error type and `s2n_blocked_status` set to `S2N_BLOCKED_ON_APPLICATION_INPUT`. The handshake is paused and further calls to `s2n_negotiate` will continue to return the same error until `s2n_client_hello_cb_done` is invoked for the `s2n_connection` to resume the handshake. This allows s2n-tls clients to process client_hello without blocking and then resume the handshake at a later time. If any of the connection properties were changed on the basis of the server_name extension then `s2n_connection_server_name_extension_used` must be invoked before marking the callback done. + */ typedef enum { S2N_CLIENT_HELLO_CB_BLOCKING, S2N_CLIENT_HELLO_CB_NONBLOCKING } s2n_client_hello_cb_mode; + +/** + * Allows the caller to set a callback function that will be called after ClientHello was parsed. + * + * @param config The configuration object being updated + * @param client_hello_callback The client hello callback function + * @param ctx A pointer to a user defined context that the Client Hello callback will be invoked with. + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_config_set_client_hello_cb(struct s2n_config *config, s2n_client_hello_fn client_hello_callback, void *ctx); + +/** + * Sets the callback execution mode. + * + * See s2n_client_hello_cb_mode for each mode's behavior. + * + * @param config The configuration object being updated + * @param cb_mode The desired callback mode + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_config_set_client_hello_cb_mode(struct s2n_config *config, s2n_client_hello_cb_mode cb_mode); + +/** + * Marks the non-blocking callback as complete. Can be invoked from within the callback when + * operating in non-blocking mode to continue the handshake. + * + * @param conn The connection object being updated + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_client_hello_cb_done(struct s2n_connection *conn); + +/** + * Must be invoked if any of the connection properties were changed on the basis of the server_name + * extension. This must be invoked before marking the Client Hello callback done. + * + * @param conn The connection object being updated + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_connection_server_name_extension_used(struct s2n_connection *conn); +/** + * Opaque client hello handle + */ struct s2n_client_hello; + +/** + * Get the Client Hello from a s2n_connection. + * + * Earliest point during the handshake when this structure is available for use is in the + * client_hello_callback (see s2n_config_set_client_hello_cb()). + * + * @param conn The connection object containing the client hello + * @returns A handle to the s2n_client_hello structure holding the client hello message sent by the client during the handshake. NULL is returned if a Client Hello has not yet been received and parsed. + */ S2N_API extern struct s2n_client_hello *s2n_connection_get_client_hello(struct s2n_connection *conn); + +/** + * Function to determine the size of the raw Client Hello buffer. + * + * Can be used to determine the necessary size of the `out` buffer for + * s2n_client_hello_get_raw_message() + * + * @param ch The Client Hello handle + * @returns The size of the ClientHello message received by the server + */ S2N_API extern ssize_t s2n_client_hello_get_raw_message_length(struct s2n_client_hello *ch); + +/** + * Copies `max_length` bytes of the ClientHello message into the `out` buffer. + * The ClientHello instrumented using this function will have the Random bytes + * zero-ed out. For SSLv2 ClientHello messages, the raw message contains only + * the cipher_specs, session_id and members portions of the hello message + * (see [RFC5246](https://tools.ietf.org/html/rfc5246#appendix-E.2)). To access other + * members, you may use s2n_connection_get_client_hello_version(), + * s2n_connection_get_client_protocol_version() and s2n_connection_get_session_id_length() + * accessors functions. + * + * @param ch The Client Hello handle + * @param out The destination buffer for the raw Client Hello + * @param max_length The size of out in bytes + * @returns The number of copied bytes + */ S2N_API extern ssize_t s2n_client_hello_get_raw_message(struct s2n_client_hello *ch, uint8_t *out, uint32_t max_length); + +/** + * Function to determine the size of the Client Hello cipher suites. + * This can be used to allocate the `out` buffer for s2n_client_hello_get_cipher_suites(). + * + * @param ch The Client Hello handle + * @returns the number of bytes the cipher_suites takes on the ClientHello message received by the server + */ S2N_API extern ssize_t s2n_client_hello_get_cipher_suites_length(struct s2n_client_hello *ch); + +/** + * Copies into the `out` buffer `max_length` bytes of the cipher_suites on the ClientHello. + * + * @param ch The Client Hello handle + * @param out The destination buffer for the raw Client Hello cipher suites + * @param max_length The size of out in bytes + * @returns The number of copied bytes + */ S2N_API extern ssize_t s2n_client_hello_get_cipher_suites(struct s2n_client_hello *ch, uint8_t *out, uint32_t max_length); + +/** + * Function to determine the size of the Client Hello extensions. + * This can be used to allocate the `out` buffer for s2n_client_hello_get_extensions(). + * + * @param ch The Client Hello handle + * @returns the number of bytes the extensions take in the ClientHello message received by the server + */ S2N_API extern ssize_t s2n_client_hello_get_extensions_length(struct s2n_client_hello *ch); + +/** + * Copies into the `out` buffer `max_length` bytes of the extensions in the ClientHello. + * + * @param ch The Client Hello handle + * @param out The destination buffer for the raw Client Hello extensions + * @param max_length The size of out in bytes + * @returns The number of copied bytes + */ S2N_API extern ssize_t s2n_client_hello_get_extensions(struct s2n_client_hello *ch, uint8_t *out, uint32_t max_length); + +/** + * Query the ClientHello message received by the server. Use this function to allocate the `out` buffer for + * other client hello extension functions. + * + * @param ch A pointer to the Client Hello + * @param extension_type Indicates the desired extension + * @returns The number of bytes the given extension type takes + */ S2N_API extern ssize_t s2n_client_hello_get_extension_length(struct s2n_client_hello *ch, s2n_tls_extension_type extension_type); + +/** + * Copies into the `out` buffer `max_length` bytes of a given extension type on the ClientHello + * + * `ch` is a pointer to the `s2n_client_hello` of the `s2n_connection` which can be obtained using s2n_connection_get_client_hello(). + * + * @param ch A pointer to the Client Hello + * @param extension_type Indicates the desired extension + * @param out A pointer to the buffer that s2n will write the client session id to. This buffer MUST be the size of `max_length` + * @param max_length The size of `out`. + * @returns The number of copied bytes + */ S2N_API extern ssize_t s2n_client_hello_get_extension_by_id(struct s2n_client_hello *ch, s2n_tls_extension_type extension_type, uint8_t *out, uint32_t max_length); + /** * Used to check if a particular extension exists in the client hello. * + * `ch` is a pointer to the `s2n_client_hello` of the `s2n_connection` which can be obtained using s2n_connection_get_client_hello(). + * * @param ch A pointer to the client hello object * @param extension_iana The iana value of the extension * @param exists A pointer that will be set to whether or not the extension exists */ S2N_API extern int s2n_client_hello_has_extension(struct s2n_client_hello *ch, uint16_t extension_iana, bool *exists); + +/** + * Get the the ClientHello session id length in bytes + * + * `ch` is a pointer to the `s2n_client_hello` of the `s2n_connection` which can be obtained using s2n_connection_get_client_hello(). + * + * @param ch A pointer to the Client Hello + * @param out_length An out pointer. s2n will set it's value to the size of the session_id in bytes. + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_client_hello_get_session_id_length(struct s2n_client_hello *ch, uint32_t *out_length); + +/** + * Copies up to `max_length` bytes of the ClientHello session_id into the `out` buffer and stores the number of copied bytes in `out_length`. + * + * Retrieve the session id as sent by the client in the ClientHello message. The session id on the `s2n_connection` may change later + * when the server sends the ServerHello; see `s2n_connection_get_session_id` for how to get the final session id used for future session resumption. + * + * Use s2n_client_hello_get_session_id_length() to get the the ClientHello session id length in bytes. `ch` is a pointer to the `s2n_client_hello` + * of the `s2n_connection` which can be obtained using s2n_connection_get_client_hello(). + * + * @param ch A pointer to the Client Hello + * @param out A pointer to the buffer that s2n will write the client session id to. This buffer MUST be the size of `max_length` + * @param out_length An out pointer. s2n will set it's value to the size of the session_id in bytes. + * @param max_length The size of `out`. + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_client_hello_get_session_id(struct s2n_client_hello *ch, uint8_t *out, uint32_t *out_length, uint32_t max_length); +/** + * Sets the file descriptor for a s2n connection. + * + * @warning If the read end of the pipe is closed unexpectedly, writing to the pipe will raise a SIGPIPE signal. + * **s2n-tls does NOT handle SIGPIPE.** A SIGPIPE signal will cause the process to terminate unless it is handled + * or ignored by the application. + * @note This file-descriptor should be active and connected + * @param conn A pointer to the s2n connection + * @param fd The new file descriptor + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_connection_set_fd(struct s2n_connection *conn, int fd); + +/** + * Sets the file descriptor for the read channel of an s2n connection. + * + * @warning If the read end of the pipe is closed unexpectedly, writing to the pipe will raise a SIGPIPE signal. + * **s2n-tls does NOT handle SIGPIPE.** A SIGPIPE signal will cause the process to terminate unless it is handled + * or ignored by the application. + * @note This file-descriptor should be active and connected + * @param conn A pointer to the s2n connection + * @param readfd The new read file descriptor + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_connection_set_read_fd(struct s2n_connection *conn, int readfd); + +/** + * Sets the assigned file descriptor for the write channel of an s2n connection. + * + * @note This file-descriptor should be active and connected + * @param conn A pointer to the s2n connection + * @param writefd The new write file descriptor + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_connection_set_write_fd(struct s2n_connection *conn, int writefd); /** - * Gets the assigned file descriptor for the read channel of an s2n connection. - * - * @param conn A pointer to the s2n connection - * @param readfd pointer to place the used file descriptor. + * Gets the assigned file descriptor for the read channel of an s2n connection. + * + * @param conn A pointer to the s2n connection + * @param readfd pointer to place the used file descriptor. + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_connection_get_read_fd(struct s2n_connection *conn, int *readfd); /** - * Gets the assigned file descriptor for the write channel of an s2n connection. - * - * @param conn A pointer to the s2n connection - * @param writefd pointer to place the used file descriptor. + * Gets the assigned file descriptor for the write channel of an s2n connection. + * + * @param conn A pointer to the s2n connection + * @param writefd pointer to place the used file descriptor. + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_connection_get_write_fd(struct s2n_connection *conn, int *writefd); +/** + * Indicates to s2n that the connection is using corked IO. + * + * @warning This API should only be used when using managed send IO. + * + * @param conn The connection object being updated + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_connection_use_corked_io(struct s2n_connection *conn); +/** + * Function pointer for a user provided send callback. + */ typedef int s2n_recv_fn(void *io_context, uint8_t *buf, uint32_t len); + +/** + * Function pointer for a user provided send callback. + */ typedef int s2n_send_fn(void *io_context, const uint8_t *buf, uint32_t len); + +/** + * Set a context containing anything needed in the recv callback function (for example, + * a file descriptor), the buffer holding data to be sent or received, and the length of the buffer. + * + * @note The `io_context` passed to the callbacks may be set separately using `s2n_connection_set_recv_ctx` and `s2n_connection_set_send_ctx`. + * + * @param conn The connection object being updated + * @param ctx A user provided context that the callback will be invoked with + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_connection_set_recv_ctx(struct s2n_connection *conn, void *ctx); + +/** + * Set a context containing anything needed in the send callback function (for example, + * a file descriptor), the buffer holding data to be sent or received, and the length of the buffer. + * + * @note The `io_context` passed to the callbacks may be set separately using `s2n_connection_set_recv_ctx` and `s2n_connection_set_send_ctx`. + * + * @param conn The connection object being updated + * @param ctx A user provided context that the callback will be invoked with + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_connection_set_send_ctx(struct s2n_connection *conn, void *ctx); + +/** + * Configure a connection to use a recv callback to receive data. + * + * @note This callback may be blocking or nonblocking. + * @note The callback may receive less than the requested length. The function should return the number + * of bytes received, or set errno and return an error code < 0. + * + * @param conn The connection object being updated + * @param recv A recv callback function pointer + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_connection_set_recv_cb(struct s2n_connection *conn, s2n_recv_fn recv); + +/** + * Configure a connection to use a send callback to send data. + * + * @note This callback may be blocking or nonblocking. + * @note The callback may send less than the requested length. The function should return the + * number of bytes sent or set errno and return an error code < 0. + * + * @param conn The connection object being updated + * @param send A send callback function pointer + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_connection_set_send_cb(struct s2n_connection *conn, s2n_send_fn send); +/** + * Change the behavior of s2n-tls when sending data to prefer high throughput. Connections preferring throughput will use + * large record sizes that minimize overhead. + * + * @param conn The connection object being updated + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_connection_prefer_throughput(struct s2n_connection *conn); + +/** + * Change the behavior of s2n-tls when sending data to prefer low latency. Connections preferring low latency will be encrypted + * using small record sizes that can be decrypted sooner by the recipient. + * + * @param conn The connection object being updated + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_connection_prefer_low_latency(struct s2n_connection *conn); + +/** + * Provides a smooth transition from s2n_connection_prefer_low_latency() to s2n_connection_prefer_throughput(). + * + * @param conn The connection object being updated + * @param resize_threshold The number of bytes to send before changing the record size + * @param timeout_threshold Reset record size back to a single segment after threshold seconds of inactivity + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_connection_set_dynamic_record_threshold(struct s2n_connection *conn, uint32_t resize_threshold, uint16_t timeout_threshold); -/* If you don't want to use the configuration wide callback, you can set this per connection and it will be honored. */ +/** + * Sets the callback to use for verifying that a hostname from an X.509 certificate is trusted. By default, + * no certificate will be trusted. To override this behavior, set this callback. See s2n_verify_host_fn() + * for details. This configuration will be inherited by default to new instances of `s2n_connection`. + * + * If a separate callback for different connections using the same config is desired, see s2n_connection_set_verify_host_callback() + * + * @note If you don't want to use the configuration wide callback, you can set this per connection and it will be honored. + * + * @param config A pointer to a s2n_config object + * @param host_fn A pointer to a callback function that s2n will invoke in order to verify the hostname of an X.509 certificate + * @param data Opaque pointer to data that the verify host function will be invoked with + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_connection_set_verify_host_callback(struct s2n_connection *config, s2n_verify_host_fn host_fn, void *data); +/** + * Used to opt-out of s2n-tls's built-in blinding. Blinding is a + * mitigation against timing side-channels which in some cases can leak information + * about encrypted data. By default s2n-tls will cause a thread to sleep between 10 and + * 30 seconds whenever tampering is detected. + * + * Setting the S2N_SELF_SERVICE_BLINDING option with s2n_connection_set_blinding() + * turns off this behavior. This is useful for applications that are handling many connections + * in a single thread. In that case, if s2n_recv() or s2n_negotiate() return an error, + * self-service applications should call *2n_connection_get_delay() and pause + * activity on the connection for the specified number of nanoseconds before calling + * close() or shutdown(). + */ typedef enum { S2N_BUILT_IN_BLINDING, S2N_SELF_SERVICE_BLINDING } s2n_blinding; + +/** + * Used to configure s2n-tls to either use built-in blinding (set blinding to S2N_BUILT_IN_BLINDING) or + * self-service blinding (set blinding to S2N_SELF_SERVICE_BLINDING). + * + * @param conn The connection object being updated + * @param blinding The desired blinding mode for the connection + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_connection_set_blinding(struct s2n_connection *conn, s2n_blinding blinding); + +/** + * Query the connection object for the configured blinding delay. + * @param conn The connection object being updated + * @returns the number of nanoseconds an application using self-service blinding should pause before calling close() or shutdown(). + */ S2N_API extern uint64_t s2n_connection_get_delay(struct s2n_connection *conn); +/** + * Sets the cipher preference override for the s2n_connection. Calling this function is not necessary + * unless you want to set the cipher preferences on the connection to something different than what is in the s2n_config. + * + * @param conn The connection object being updated + * @param version The human readable string representation of the security policy version. + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_connection_set_cipher_preferences(struct s2n_connection *conn, const char *version); @@ -429,23 +1625,83 @@ extern int s2n_connection_set_cipher_preferences(struct s2n_connection *conn, co * @param conn The connection object being updated * @param protocol A pointer to a slice of bytes * @param protocol_len The length of bytes that should be read from `protocol`. Note: this value cannot be 0, otherwise an error will be returned. + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_connection_append_protocol_preference(struct s2n_connection *conn, const uint8_t *protocol, uint8_t protocol_len); +/** + * Sets the protocol preference override for the s2n_connection. Calling this function is not necessary unless you want + * to set the protocol preferences on the connection to something different than what is in the s2n_config. + * + * @param conn The connection object being updated + * @param protocols A pointer to an array of protocol strings + * @param protocol_count The number of protocols contained in protocols + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_connection_set_protocol_preferences(struct s2n_connection *conn, const char * const *protocols, int protocol_count); + +/** + * Sets the server name for the connection. + * + * @note In the future, this can be used by clients who wish to use the TLS "Server Name indicator" + * extension. At present, client functionality is disabled. + * + * @param conn The connection object being queried + * @param server_name A pointer to a string containing the desired server name + * @warning `server_name` must be a NULL terminated string. + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_set_server_name(struct s2n_connection *conn, const char *server_name); + +/** + * Query the connection for the selected server name. + * + * This can be used by a server to determine which server name the client is using. This function returns the first ServerName entry + * in the ServerNameList sent by the client. Subsequent entries are not returned. + * + * @param conn The connection object being queried + * @returns The server name associated with a connection, or NULL if none is found. + */ S2N_API extern const char *s2n_get_server_name(struct s2n_connection *conn); + +/** + * Query the connection for the selected application protocol. + * + * @param conn The connection object being queried + * @returns The negotiated application protocol for a `s2n_connection`. In the event of no protocol being negotiated, NULL is returned. + */ S2N_API extern const char *s2n_get_application_protocol(struct s2n_connection *conn); + +/** + * Query the connection for a buffer containing the OCSP reponse. + * + * @param conn The connection object being queried + * @param length A pointer that is set to the certificate transparency response buffer's size + * @returns A pointer to the OCSP response sent by a server during the handshake. If no status response is received, NULL is returned. + */ S2N_API extern const uint8_t *s2n_connection_get_ocsp_response(struct s2n_connection *conn, uint32_t *length); + +/** + * Query the connection for a buffer containing the Certificate Transparency response. + * + * @param conn The connection object being queried + * @param length A pointer that is set to the certificate transparency response buffer's size + * @returns A pointer to the certificate transparency response buffer. + */ S2N_API extern const uint8_t *s2n_connection_get_sct_list(struct s2n_connection *conn, uint32_t *length); +/** + * Used in non-blocking mode to indicate in which direction s2n-tls became blocked on I/O before it + * returned control to the caller. This allows an application to avoid retrying s2n-tls operations + * until I/O is possible in that direction. + */ typedef enum { S2N_NOT_BLOCKED = 0, S2N_BLOCKED_ON_READ, @@ -453,40 +1709,264 @@ typedef enum { S2N_BLOCKED_ON_APPLICATION_INPUT, S2N_BLOCKED_ON_EARLY_DATA, } s2n_blocked_status; + +/** + * Performs the initial "handshake" phase of a TLS connection and must be called before any s2n_recv() or s2n_send() calls. + * + * @param conn A pointer to the s2n_connection object + * @param blocked A pointer which will be set to the blocked status. + * @returns The number of bytes written, and may indicate a partial write + */ S2N_API extern int s2n_negotiate(struct s2n_connection *conn, s2n_blocked_status *blocked); + +/** + * Writes and encrypts `size` of `buf` data to the associated connection. s2n_send() will return the number of bytes + * written, and may indicate a partial write. + * + * @note Partial writes are possible not just for non-blocking I/O, but also for connections aborted while active. + * @note Unlike OpenSSL, repeated calls to s2n_send() should not duplicate the original parameters, but should + * update `buf` and `size` per the indication of size written. For example; + * ```c + * s2n_blocked_status blocked; + * int written = 0; + * char data[10]; + * do { + * int w = s2n_send(conn, data + written, 10 - written, &blocked); + * if (w < 0) { + * break; + * } + * written += w; + * } while (blocked != S2N_NOT_BLOCKED); + * ``` + * + * @param conn A pointer to the s2n_connection object + * @param buf A pointer to a buffer that s2n will write data from + * @param size The size of buf + * @param blocked A pointer which will be set to the blocked status, as in s2n_negotiate() + * @returns The number of bytes written, and may indicate a partial write + */ S2N_API extern ssize_t s2n_send(struct s2n_connection *conn, const void *buf, ssize_t size, s2n_blocked_status *blocked); + +/** + * Works in the same way as s2n_sendv_with_offset() except that the latter's `offs` parameter is implicitly assumed to be 0. + * Therefore in the partial write case, the caller would have to make sure that `bufs` and `count` fields are modified in a way that takes + * the partial writes into account. + * + * @param conn A pointer to the s2n_connection object + * @param bufs A pointer to a vector of buffers that s2n will write data from. + * @param count The number of buffers in `bufs` + * @param blocked A pointer which will be set to the blocked status, as in s2n_negotiate() + * @returns The number of bytes written, and may indicate a partial write. + */ S2N_API extern ssize_t s2n_sendv(struct s2n_connection *conn, const struct iovec *bufs, ssize_t count, s2n_blocked_status *blocked); + +/** + * Works in the same way as s2n_send() except that it accepts vectorized buffers. Will return the number of bytes written, and may indicate a partial write. Partial writes are possible not just for non-blocking I/O, but also for connections aborted while active. + * + * @note Partial writes are possible not just for non-blocking I/O, but also for connections aborted while active. + * + * @note Unlike OpenSSL, repeated calls to s2n_sendv_with_offset() should not duplicate the original parameters, but should update `bufs` and `count` per the indication of size written. For example; + * + * ```c + * s2n_blocked_status blocked; + * int written = 0; + * char data[10]; + * struct iovec iov[1]; + * iov[0].iov_base = data; + * iov[0].iov_len = 10; + * do { + * int w = s2n_sendv_with_offset(conn, iov, 1, written, &blocked); + * if (w < 0) { + * break; + * } + * written += w; + * } while (blocked != S2N_NOT_BLOCKED); + * ``` + * + * @param conn A pointer to the s2n_connection object + * @param bufs A pointer to a vector of buffers that s2n will write data from. + * @param count The number of buffers in `bufs` + * @param offs The write cursor offset. This should be updated as data is written. See the example code. + * @param blocked A pointer which will be set to the blocked status, as in s2n_negotiate() + * @returns The number of bytes written, and may indicate a partial write. + */ S2N_API extern ssize_t s2n_sendv_with_offset(struct s2n_connection *conn, const struct iovec *bufs, ssize_t count, ssize_t offs, s2n_blocked_status *blocked); + +/** + * Decrypts and reads **size* to `buf` data from the associated + * connection. + * + * @note Unlike OpenSSL, repeated calls to `s2n_recv` should not duplicate the original parameters, but should update `buf` and `size` per the indication of size read. For example; + * ```c + * s2n_blocked_status blocked; + * int bytes_read = 0; + * char data[10]; + * do { + * int r = s2n_recv(conn, data + bytes_read, 10 - bytes_read, &blocked); + * if (r < 0) { + * break; + * } + * bytes_read += r; + * } while (blocked != S2N_NOT_BLOCKED); + * ``` + * + * @param conn A pointer to the s2n_connection object + * @param buf A pointer to a buffer that s2n will place read data into. + * @param size Size of `buf` + * @param blocked A pointer which will be set to the blocked status, as in s2n_negotiate() + * @returns number of bytes read. 0 if the connection was shutdown by peer. + */ S2N_API extern ssize_t s2n_recv(struct s2n_connection *conn, void *buf, ssize_t size, s2n_blocked_status *blocked); + +/** + * Allows users of s2n-tls to peek inside the data buffer of an s2n-tls connection to see if there more data to be read without actually reading it. + * + * This is useful when using select() on the underlying s2n-tls file descriptor with a message based application layer protocol. As a single call + * to s2n_recv may read all data off the underlying file descriptor, select() will be unable to tell you there if there is more application data + * ready for processing already loaded into the s2n-tls buffer. + * + * @note can then be used to determine if s2n_recv() needs to be called before more data comes in on the raw fd + * @param conn A pointer to the s2n_connection object + * @returns The number of bytes that can be read from the connection + */ S2N_API extern uint32_t s2n_peek(struct s2n_connection *conn); +/** + * Wipes and releases buffers and memory allocated during the TLS handshake. + * + * @note This function should be called after the handshake is successfully negotiated and logging or recording of handshake data is complete. + * + * @param conn A pointer to the s2n_connection object + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_connection_free_handshake(struct s2n_connection *conn); + +/** + * Wipes and free the `in` and `out` buffers associated with a connection. + * + * @note This function may be called when a connection is + * in keep-alive or idle state to reduce memory overhead of long lived connections. + * + * @param conn A pointer to the s2n_connection object + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_connection_release_buffers(struct s2n_connection *conn); + +/** + * Wipes an existing connection and allows it to be reused. Erases all data associated with a connection including + * pending reads. + * + * @note This function should be called after all I/O is completed and s2n_shutdown has been called. + * @note Reusing the same connection handle(s) is more performant than repeatedly calling s2n_connection_new() and s2n_connection_free(). + * + * @param conn A pointer to the s2n_connection object + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_connection_wipe(struct s2n_connection *conn); + +/** + * Frees the memory associated with an s2n_connection + * handle. The handle is considered invalid after `s2n_connection_free` is used. + * s2n_connection_wipe() does not need to be called prior to this function. `s2n_connection_free` performs its own wipe + * of sensitive data. + * + * @param conn A pointer to the s2n_connection object + * @returns 0 on success. -1 on failure + */ S2N_API extern int s2n_connection_free(struct s2n_connection *conn); + +/** + * Attempts a closure at the TLS layer. Does not close the underlying transport. This call may block in either direction. + * + * Unlike other TLS implementations, `s2n_shutdown` attempts a graceful shutdown by default. It will not return with success unless a close_notify alert is successfully + * sent and received. As a result, `s2n_shutdown` may fail when interacting with a non-conformant TLS implementation. + * + * Once `s2n_shutdown` is complete: + * * The s2n_connection handle cannot be used for reading for writing. + * * The underlying transport can be closed. Most likely via `close()`. + * * The s2n_connection handle can be freed via s2n_connection_free() or reused via s2n_connection_wipe() + * + * @param conn A pointer to the s2n_connection object + * @param blocked A pointer which will be set to the blocked status, as in s2n_negotiate() + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_shutdown(struct s2n_connection *conn, s2n_blocked_status *blocked); +/** + * Used to declare what type of client certificate authentication to use. + * + * Currently the default for s2n-tls is for neither the server side or the client side to use Client (aka Mutual) authentication. + */ typedef enum { S2N_CERT_AUTH_NONE, S2N_CERT_AUTH_REQUIRED, S2N_CERT_AUTH_OPTIONAL } s2n_cert_auth_type; +/** + * Gets Client Certificate authentication method the s2n_config object is using. + * + * @param config A pointer to a s2n_config object + * @param client_auth_type A pointer to a client auth policy. This will be updated to the s2n_config value. + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_config_get_client_auth_type(struct s2n_config *config, s2n_cert_auth_type *client_auth_type); + +/** + * Sets whether or not a Client Certificate should be required to complete the TLS Connection. + * + * If this is set to `S2N_CERT_AUTH_OPTIONAL` the server will request a client certificate but allow the client to not provide one. + * Rejecting a client certificate when using `S2N_CERT_AUTH_OPTIONAL` will terminate the handshake. + * + * @param config A pointer to a s2n_config object + * @param client_auth_type The client auth policy for the connection + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_config_set_client_auth_type(struct s2n_config *config, s2n_cert_auth_type client_auth_type); + +/** + * Gets Client Certificate authentication method the s2n_connection object is using. + * + * @param conn A pointer to the s2n_connection object + * @param client_auth_type A pointer to a client auth policy. This will be updated to the s2n_connection value. + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_connection_get_client_auth_type(struct s2n_connection *conn, s2n_cert_auth_type *client_auth_type); + +/** + * Sets whether or not a Client Certificate should be required to complete the TLS Connection. + * + * If this is set to `S2N_CERT_AUTH_OPTIONAL` the server will request a client certificate but allow the client to not provide one. + * Rejecting a client certificate when using `S2N_CERT_AUTH_OPTIONAL` will terminate the handshake. + * + * @param conn A pointer to the s2n_connection object + * @param client_auth_type The client auth policy for the connection + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_connection_set_client_auth_type(struct s2n_connection *conn, s2n_cert_auth_type client_auth_type); + +/** + * Gets the client certificate chain and places it in the `der_cert_chain_out` buffer. `cert_chain_len` is updated + * to match the size the chain buffer. + * + * @warning The buffers share a lifetime with the s2n_connection object. + * + * @param conn A pointer to the s2n_connection object + * @param der_cert_chain_out A uint8_t pointer. This will be updated to point to the client certificate chain. + * @param cert_chain_len A pointer to a uint32_t. This will be updated to match the size of the buffer `der_cert_chain_out` points to. + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ S2N_API extern int s2n_connection_get_client_cert_chain(struct s2n_connection *conn, uint8_t **der_cert_chain_out, uint32_t *cert_chain_len); @@ -495,6 +1975,7 @@ extern int s2n_connection_get_client_cert_chain(struct s2n_connection *conn, uin * * @param config A pointer to the config object. * @param num The number of session tickets that will be sent. + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_set_initial_ticket_count(struct s2n_config *config, uint8_t num); @@ -504,6 +1985,7 @@ extern int s2n_config_set_initial_ticket_count(struct s2n_config *config, uint8_ * * @param conn A pointer to the connection object. * @param num The number of additional session tickets to send. + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_connection_add_new_tickets_to_send(struct s2n_connection *conn, uint8_t num); @@ -518,6 +2000,7 @@ extern int s2n_connection_add_new_tickets_to_send(struct s2n_connection *conn, u * * @param conn A pointer to the connection object. * @param num The number of additional session tickets sent. + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_connection_get_tickets_sent(struct s2n_connection *conn, uint16_t *num); @@ -528,6 +2011,7 @@ extern int s2n_connection_get_tickets_sent(struct s2n_connection *conn, uint16_t * * @param conn A pointer to the connection object. * @param lifetime_in_secs Lifetime of keying material in seconds. + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_connection_set_server_keying_material_lifetime(struct s2n_connection *conn, uint32_t lifetime_in_secs); @@ -595,25 +2079,106 @@ extern int s2n_session_ticket_get_data(struct s2n_session_ticket *ticket, size_t S2N_API extern int s2n_session_ticket_get_lifetime(struct s2n_session_ticket *ticket, uint32_t *session_lifetime); +/** + * De-serializes the session state and updates the connection accordingly. + * + * @param conn A pointer to the s2n_connection object + * @param session A pointer to a buffer of size `length` + * @param length The size of the `session` buffer + * + * @returns The number of copied bytes + */ S2N_API extern int s2n_connection_set_session(struct s2n_connection *conn, const uint8_t *session, size_t length); + +/** + * Serializes the session state from connection and copies into the `session` buffer and returns the number of copied bytes + * + * The output of this function depends on whether session ids or session tickets are being used for resumption. + * + * @note This is for < TLS 1.3 session resumption. + * + * @param conn A pointer to the s2n_connection object + * @param session A pointer to a buffer of size `max_length` + * @param max_length The size of the `session` buffer + * + * @returns The number of copied bytes + */ S2N_API extern int s2n_connection_get_session(struct s2n_connection *conn, uint8_t *session, size_t max_length); + +/** + * Get the lifetime hint for a session. + * + * @param conn A pointer to the s2n_connection object + * + * @returns The session ticket lifetime hint in seconds from the server or -1 when session ticket was not used for resumption. + */ S2N_API extern int s2n_connection_get_session_ticket_lifetime_hint(struct s2n_connection *conn); + +/** + * Use this to query the serialized session state size before copying it into a buffer. + * + * @param conn A pointer to the s2n_connection object + * + * @returns number of bytes needed to store serialized session state + */ S2N_API extern int s2n_connection_get_session_length(struct s2n_connection *conn); + +/** + * Gets the latest session id's length from the connection. + * + * Use this to query the session id size before copying it into a buffer. + * + * @param conn A pointer to the s2n_connection object + * + * @returns The latest session id length from the connection. Session id length will be 0 for TLS versions >= TLS1.3 as stateful session resumption has not yet been implemented in TLS1.3. + */ S2N_API extern int s2n_connection_get_session_id_length(struct s2n_connection *conn); + +/** +* Gets the latest session id from the connection, copies it into the `session_id` buffer, and returns the number of copied bytes. +* +* The session id may change between s2n receiving the ClientHello and sending the ServerHello, but this function will always describe the latest session id. +* +* See s2n_client_hello_get_session_id() to get the session id as it was sent by the client in the ClientHello message. + * + * @param conn A pointer to the s2n_connection object + * @param session_id A pointer to a buffer of size `max_length` + * @param max_length The size of the `session_id` buffer + * + * @returns The number of copied bytes. + */ S2N_API extern int s2n_connection_get_session_id(struct s2n_connection *conn, uint8_t *session_id, size_t max_length); + +/** + * Check if the connection was resumed from an earlier handshake. + * + * @param conn A pointer to the s2n_connection object + * + * @returns returns 1 if the handshake was abbreviated, otherwise returns 0 + */ S2N_API extern int s2n_connection_is_session_resumed(struct s2n_connection *conn); + +/** + * Check is the connection is OCSP stapled. + * + * @param conn A pointer to the s2n_connection object + * + * @returns 1 if OCSP response was sent (if connection is in S2N_SERVER mode) or received (if connection is in S2N_CLIENT mode) during handshake, otherwise it returns 0. + */ S2N_API extern int s2n_connection_is_ocsp_stapled(struct s2n_connection *conn); -/* TLS Signature Algorithms - RFC 5246 7.4.1.4.1 */ -/* https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-16 */ +/** + * TLS Signature Algorithms - RFC 5246 7.4.1.4.1 + * https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-16 + */ typedef enum { S2N_TLS_SIGNATURE_ANONYMOUS = 0, S2N_TLS_SIGNATURE_RSA = 1, @@ -624,8 +2189,9 @@ typedef enum { S2N_TLS_SIGNATURE_RSA_PSS_PSS } s2n_tls_signature_algorithm; -/* TLS Hash Algorithm - https://tools.ietf.org/html/rfc5246#section-7.4.1.4.1 */ -/* https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-18 */ +/** TLS Hash Algorithms - https://tools.ietf.org/html/rfc5246#section-7.4.1.4.1 + * https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-18 + */ typedef enum { S2N_TLS_HASH_NONE = 0, S2N_TLS_HASH_MD5 = 1, @@ -639,23 +2205,70 @@ typedef enum { S2N_TLS_HASH_MD5_SHA1 = 224 } s2n_tls_hash_algorithm; +/** + * Get the connection's selected signature algorithm. + * + * @param conn A pointer to the s2n_connection object + * @param chosen_alg A pointer to a s2n_tls_signature_algorithm object. This is an output parameter. + * + * @returns S2N_SUCCESS on success. S2N_FAILURE if bad parameters are received. + */ S2N_API extern int s2n_connection_get_selected_signature_algorithm(struct s2n_connection *conn, s2n_tls_signature_algorithm *chosen_alg); + +/** + * Get the connection's selected digest algorithm. + * + * @param conn A pointer to the s2n_connection object + * @param chosen_alg A pointer to a s2n_tls_hash_algorithm object. This is an output parameter. + * + * @returns S2N_SUCCESS on success. S2N_FAILURE if bad parameters are received. + */ S2N_API extern int s2n_connection_get_selected_digest_algorithm(struct s2n_connection *conn, s2n_tls_hash_algorithm *chosen_alg); + +/** + * Get the client certificate's signature algorithm. + * + * @param conn A pointer to the s2n_connection object + * @param chosen_alg A pointer to a s2n_tls_signature_algorithm object. This is an output parameter. + * + * @returns S2N_SUCCESS on success. S2N_FAILURE if bad parameters are received. + */ S2N_API extern int s2n_connection_get_selected_client_cert_signature_algorithm(struct s2n_connection *conn, s2n_tls_signature_algorithm *chosen_alg); + +/** + * Get the client certificate's digest algorithm. + * + * @param conn A pointer to the s2n_connection object + * @param chosen_alg A pointer to a s2n_tls_hash_algorithm object. This is an output parameter. + * + * @returns S2N_SUCCESS on success. S2N_FAILURE if bad parameters are received. + */ S2N_API extern int s2n_connection_get_selected_client_cert_digest_algorithm(struct s2n_connection *conn, s2n_tls_hash_algorithm *chosen_alg); +/** + * Get the certificate used during the TLS handshake + * + * - If `conn` is a server connection, the certificate selected will depend on the + * ServerName sent by the client and supported ciphers. + * - If `conn` is a client connection, the certificate sent in response to a CertificateRequest + * message is returned. Currently s2n-tls supports loading only one certificate in client mode. Note that + * not all TLS endpoints will request a certificate. + * + * @param conn A pointer to the s2n_connection object + * + * @returns NULL if the certificate selection phase of the handshake has not completed or if a certificate was not requested by the peer + */ S2N_API extern struct s2n_cert_chain_and_key *s2n_connection_get_selected_cert(struct s2n_connection *conn); /** - * Returns the length of the s2n certificate chain `chain_and_key`. - * * @param chain_and_key A pointer to the s2n_cert_chain_and_key object being read. * @param cert_length This return value represents the length of the s2n certificate chain `chain_and_key`. + * @returns the length of the s2n certificate chain `chain_and_key`. */ S2N_API extern int s2n_cert_chain_get_length(const struct s2n_cert_chain_and_key *chain_and_key, uint32_t *cert_length); @@ -717,7 +2330,7 @@ extern int s2n_cert_get_der(const struct s2n_cert *cert, const uint8_t **out_cer * `s2n_cert_chain_and_key_free` API. * * @param conn A pointer to the s2n_connection object being read. - * @param s2n_cert_chain_and_key The returned validated peer certificate chain `cert_chain` retrieved from the s2n connection. + * @param cert_chain The returned validated peer certificate chain `cert_chain` retrieved from the s2n connection. */ S2N_API extern int s2n_connection_get_peer_cert_chain(const struct s2n_connection *conn, struct s2n_cert_chain_and_key *cert_chain); @@ -770,12 +2383,17 @@ extern int s2n_cert_get_utf8_string_from_extension_data_length(const uint8_t *ex S2N_API extern int s2n_cert_get_utf8_string_from_extension_data(const uint8_t *extension_data, uint32_t extension_len, uint8_t *out_data, uint32_t *out_len); -/* Pre-shared key (PSK) Hash Algorithm - RFC 8446 Section-2.2 */ +/** + * Pre-shared key (PSK) Hash Algorithm - RFC 8446 Section-2.2 + */ typedef enum { S2N_PSK_HMAC_SHA256, S2N_PSK_HMAC_SHA384, } s2n_psk_hmac; +/** + * Opaque pre shared key handle + */ struct s2n_psk; /** @@ -784,7 +2402,7 @@ struct s2n_psk; * * Use `s2n_psk_free` to free the memory allocated to the s2n external PSK object created by this API. * - * @return struct s2n_psk* Returns a pointer to the newly created external PSK object. + * @returns struct s2n_psk* Returns a pointer to the newly created external PSK object. */ S2N_API struct s2n_psk* s2n_external_psk_new(void); @@ -933,7 +2551,7 @@ struct s2n_offered_psk; * * Use `s2n_offered_psk_free` to free the memory allocated to the s2n offered PSK object created by this API. * - * @return struct s2n_offered_psk* Returns a pointer to the newly created offered PSK object. + * @returns struct s2n_offered_psk* Returns a pointer to the newly created offered PSK object. */ S2N_API struct s2n_offered_psk* s2n_offered_psk_new(void); @@ -968,7 +2586,7 @@ struct s2n_offered_psk_list; * After the completion of `s2n_psk_selection_callback` this pointer is invalid. * * @param psk_list A pointer to the offered PSK list being read. - * @return bool A boolean value representing whether an offered psk object is present next in line in the offered PSK list. + * @returns bool A boolean value representing whether an offered psk object is present next in line in the offered PSK list. */ S2N_API bool s2n_offered_psk_list_has_next(struct s2n_offered_psk_list *psk_list); @@ -1036,20 +2654,76 @@ typedef int (*s2n_psk_selection_callback)(struct s2n_connection *conn, void *con S2N_API int s2n_config_set_psk_selection_callback(struct s2n_config *config, s2n_psk_selection_callback cb, void *context); +/** + * Get the number of bytes the connection has received. + * + * @param conn A pointer to the connection + * @returns return the number of bytes received by s2n-tls "on the wire" + */ S2N_API extern uint64_t s2n_connection_get_wire_bytes_in(struct s2n_connection *conn); + +/** + * Get the number of bytes the connection has transmitted out. + * + * @param conn A pointer to the connection + * @returns return the number of bytes transmitted out by s2n-tls "on the wire" + */ S2N_API extern uint64_t s2n_connection_get_wire_bytes_out(struct s2n_connection *conn); + +/** + * Access the protocol version supported by the client of the connection. + * + * @param conn A pointer to the connection + * @returns returns the protocol version number supported by the client_auth_type + */ S2N_API extern int s2n_connection_get_client_protocol_version(struct s2n_connection *conn); + +/** + * Access the protocol version supported by the server of the connection. + * + * @param conn A pointer to the connection + * @returns Returns the protocol version number supported by the server + */ S2N_API extern int s2n_connection_get_server_protocol_version(struct s2n_connection *conn); + +/** + * Access the protocol version selected for the connection. + * + * @param conn A pointer to the connection + * @returns The protocol version number actually used by s2n-tls for the connection + */ S2N_API extern int s2n_connection_get_actual_protocol_version(struct s2n_connection *conn); + +/** + * Access the client hello protocol version for the connection. + * + * @param conn A pointer to the connection + * @returns The protocol version used to send the initial client hello message. + */ S2N_API extern int s2n_connection_get_client_hello_version(struct s2n_connection *conn); + +/** + * Check if Client Auth was used for a connection. + * + * @param conn A pointer to the connection + * @returns 1 if the handshake completed and Client Auth was negotiated during then + * handshake. + */ S2N_API extern int s2n_connection_client_cert_used(struct s2n_connection *conn); + +/** + * A function that provides a human readable string of the cipher suite that was chosen + * for a connection. + * @param conn A pointer to the connection + * @returns A string indicating the cipher suite negotiated by s2n in OpenSSL format. + */ S2N_API extern const char *s2n_connection_get_cipher(struct s2n_connection *conn); @@ -1066,28 +2740,93 @@ extern const char *s2n_connection_get_cipher(struct s2n_connection *conn); * @param conn A pointer to the connection being read * @param first A pointer to a single byte, which will be updated with the first byte in the registered IANA value. * @param second A pointer to a single byte, which will be updated with the second byte in the registered IANA value. - * @return A POSIX error signal. If an error was returned, the values contained in `first` and `second` should be considered invalid. + * @returns A POSIX error signal. If an error was returned, the values contained in `first` and `second` should be considered invalid. */ S2N_API extern int s2n_connection_get_cipher_iana_value(struct s2n_connection *conn, uint8_t *first, uint8_t *second); +/** + * Function to check if the cipher used by current connection is supported by the current + * cipher preferences. + * @param conn A pointer to the s2n connection + * @param version A string representing the security policy to check against. + * @returns 1 if the connection satisfies the cipher suite. 0 if the connection does not satisfy the cipher suite. -1 if there is an error. + */ S2N_API extern int s2n_connection_is_valid_for_cipher_preferences(struct s2n_connection *conn, const char *version); + +/** + * Function to get the human readable elliptic curve name for the connection. + * + * @param conn A pointer to the s2n connection + * @returns A string indicating the elliptic curve used during ECDHE key exchange. The string "NONE" is returned if no curve was used. + */ S2N_API extern const char *s2n_connection_get_curve(struct s2n_connection *conn); + +/** + * Function to get the human readable KEM name for the connection. + * + * @param conn A pointer to the s2n connection + * @returns A human readable string for the KEM group. If there is no KEM configured returns "NONE" + */ S2N_API extern const char *s2n_connection_get_kem_name(struct s2n_connection *conn); + +/** + * Function to get the human readable KEM group name for the connection. + * + * @param conn A pointer to the s2n connection + * @returns A human readable string for the KEM group. If the connection is < TLS1.3 or there is no KEM group configured returns "NONE" + */ S2N_API extern const char *s2n_connection_get_kem_group_name(struct s2n_connection *conn); + +/** + * Function to get the alert that caused a connection to close. s2n-tls considers all + * TLS alerts fatal and shuts down a connection whenever one is received. + * + * @param conn A pointer to the s2n connection + * @returns The TLS alert code that caused a connection to be shut down + */ S2N_API extern int s2n_connection_get_alert(struct s2n_connection *conn); + +/** + * Function to return the last TLS handshake type that was processed. The returned format is a human readable string. + * + * @param conn A pointer to the s2n connection + * @returns A human-readable handshake type name, e.g. "NEGOTIATED|FULL_HANDSHAKE|PERFECT_FORWARD_SECRECY" + */ S2N_API extern const char *s2n_connection_get_handshake_type_name(struct s2n_connection *conn); + +/** + * Function to return the last TLS message that was processed. The returned format is a human readable string. + * @param conn A pointer to the s2n connection + * @returns The last message name in the TLS state machine, e.g. "SERVER_HELLO", "APPLICATION_DATA". + */ S2N_API extern const char *s2n_connection_get_last_message_name(struct s2n_connection *conn); +/** + * Opaque async private key operation handle + */ struct s2n_async_pkey_op; + +/** + * Sets whether or not a connection should enforce strict signature validation during the + * `s2n_async_pkey_op_apply` call. + * + * `mode` can take the following values: + * - `S2N_ASYNC_PKEY_VALIDATION_FAST` - default behavior: s2n-tls will perform only the minimum validation required for safe use of the asyn pkey operation. + * - `S2N_ASYNC_PKEY_VALIDATION_STRICT` - in addition to the previous checks, s2n-tls will also ensure that the signature created as a result of the async private key sign operation matches the public key on the connection. + */ typedef enum { S2N_ASYNC_PKEY_VALIDATION_FAST, S2N_ASYNC_PKEY_VALIDATION_STRICT } s2n_async_pkey_validation_mode; + +/** + * The type of private key operation + */ typedef enum { S2N_ASYNC_DECRYPT, S2N_ASYNC_SIGN } s2n_async_pkey_op_type; /** @@ -1122,7 +2861,7 @@ extern int s2n_config_set_async_pkey_callback(struct s2n_config *config, s2n_asy * * Safe to call from a different thread, as long as no other thread is operating on `op`. * * @param op An opaque object representing the private key operation - * @param s2n_cert_private_key The private key used for the operation. It can be extracted from + * @param key The private key used for the operation. It can be extracted from * `conn` through the `s2n_connection_get_selected_cert` and `s2n_cert_chain_and_key_get_key` calls */ S2N_API @@ -1258,9 +2997,11 @@ typedef int (*s2n_key_log_fn)(void *ctx, struct s2n_connection *conn, uint8_t *l S2N_API extern int s2n_config_set_key_log_cb(struct s2n_config *config, s2n_key_log_fn callback, void *ctx); -/* s2n_config_enable_cert_req_dss_legacy_compat adds a dss cert type in the server certificate request when being called. +/** + * s2n_config_enable_cert_req_dss_legacy_compat adds a dss cert type in the server certificate request when being called. * It only sends the dss cert type in the cert request but does not succeed the handshake if a dss cert is received. * Please DO NOT call this api unless you know you actually need legacy DSS certificate type compatibility + * @param config Config to enable legacy DSS certificates for */ S2N_API extern int s2n_config_enable_cert_req_dss_legacy_compat(struct s2n_config *config); @@ -1273,7 +3014,7 @@ extern int s2n_config_enable_cert_req_dss_legacy_compat(struct s2n_config *confi * * @param config A pointer to the config * @param max_early_data_size The maximum early data that the server will accept - * @return A POSIX error signal. If successful, the maximum early data size was updated. + * @returns A POSIX error signal. If successful, the maximum early data size was updated. */ S2N_API int s2n_config_set_server_max_early_data_size(struct s2n_config *config, uint32_t max_early_data_size); @@ -1285,7 +3026,7 @@ S2N_API int s2n_config_set_server_max_early_data_size(struct s2n_config *config, * * @param conn A pointer to the connection * @param max_early_data_size The maximum early data the server will accept - * @return A POSIX error signal. If successful, the maximum early data size was updated. + * @returns A POSIX error signal. If successful, the maximum early data size was updated. */ S2N_API int s2n_connection_set_server_max_early_data_size(struct s2n_connection *conn, uint32_t max_early_data_size); @@ -1302,7 +3043,7 @@ S2N_API int s2n_connection_set_server_max_early_data_size(struct s2n_connection * @param conn A pointer to the connection * @param context A pointer to the user context data. This data will be copied. * @param context_size The size of the data to read from the `context` pointer. - * @return A POSIX error signal. If successful, the context was updated. + * @returns A POSIX error signal. If successful, the context was updated. */ S2N_API int s2n_connection_set_server_early_data_context(struct s2n_connection *conn, const uint8_t *context, uint16_t context_size); @@ -1319,7 +3060,7 @@ S2N_API int s2n_connection_set_server_early_data_context(struct s2n_connection * * @param max_early_data_size The maximum early data that can be sent or received using this key. * @param cipher_suite_first_byte The first byte in the registered IANA value of the associated cipher suite. * @param cipher_suite_second_byte The second byte in the registered IANA value of the associated cipher suite. - * @return A POSIX error signal. If successful, `psk` was updated. + * @returns A POSIX error signal. If successful, `psk` was updated. */ S2N_API int s2n_psk_configure_early_data(struct s2n_psk *psk, uint32_t max_early_data_size, uint8_t cipher_suite_first_byte, uint8_t cipher_suite_second_byte); @@ -1333,7 +3074,7 @@ S2N_API int s2n_psk_configure_early_data(struct s2n_psk *psk, uint32_t max_early * @param psk A pointer to the pre-shared key, created with `s2n_external_psk_new`. * @param application_protocol A pointer to the associated application protocol data. This data will be copied. * @param size The size of the data to read from the `application_protocol` pointer. - * @return A POSIX error signal. If successful, the application protocol was set. + * @returns A POSIX error signal. If successful, the application protocol was set. */ S2N_API int s2n_psk_set_application_protocol(struct s2n_psk *psk, const uint8_t *application_protocol, uint8_t size); @@ -1346,11 +3087,12 @@ S2N_API int s2n_psk_set_application_protocol(struct s2n_psk *psk, const uint8_t * @param psk A pointer to the pre-shared key, created with `s2n_external_psk_new`. * @param context A pointer to the associated user context data. This data will be copied. * @param size The size of the data to read from the `context` pointer. - * @return A POSIX error signal. If successful, the context was set. + * @returns A POSIX error signal. If successful, the context was set. */ S2N_API int s2n_psk_set_early_data_context(struct s2n_psk *psk, const uint8_t *context, uint16_t size); -/* The status of early data on a connection. +/** + * The status of early data on a connection. * * S2N_EARLY_DATA_STATUS_OK: Early data is in progress. * S2N_EARLY_DATA_STATUS_NOT_REQUESTED: The client did not request early data, so none was sent or received. @@ -1372,7 +3114,7 @@ typedef enum { * * @param conn A pointer to the connection * @param status A pointer which will be set to the current early data status - * @return A POSIX error signal. + * @returns A POSIX error signal. */ S2N_API int s2n_connection_get_early_data_status(struct s2n_connection *conn, s2n_early_data_status_t *status); @@ -1385,7 +3127,7 @@ S2N_API int s2n_connection_get_early_data_status(struct s2n_connection *conn, s2 * * @param conn A pointer to the connection * @param allowed_early_data_size A pointer which will be set to the remaining early data currently allowed by `conn` - * @return A POSIX error signal. + * @returns A POSIX error signal. */ S2N_API int s2n_connection_get_remaining_early_data_size(struct s2n_connection *conn, uint32_t *allowed_early_data_size); @@ -1398,7 +3140,7 @@ S2N_API int s2n_connection_get_remaining_early_data_size(struct s2n_connection * * * @param conn A pointer to the connection * @param max_early_data_size A pointer which will be set to the maximum early data allowed by `conn` - * @return A POSIX error signal. + * @returns A POSIX error signal. */ S2N_API int s2n_connection_get_max_early_data_size(struct s2n_connection *conn, uint32_t *max_early_data_size); @@ -1414,7 +3156,7 @@ S2N_API int s2n_connection_get_max_early_data_size(struct s2n_connection *conn, * @param data_len The size of the early data to send * @param data_sent A pointer which will be set to the size of the early data sent * @param blocked A pointer which will be set to the blocked status, as in `s2n_negotiate`. - * @return A POSIX error signal. The error should be handled as in `s2n_negotiate`. + * @returns A POSIX error signal. The error should be handled as in `s2n_negotiate`. */ S2N_API int s2n_send_early_data(struct s2n_connection *conn, const uint8_t *data, ssize_t data_len, ssize_t *data_sent, s2n_blocked_status *blocked); @@ -1431,7 +3173,7 @@ S2N_API int s2n_send_early_data(struct s2n_connection *conn, const uint8_t *data * @param max_data_len The size of the early data buffer * @param data_received A pointer which will be set to the size of the early data received * @param blocked A pointer which will be set to the blocked status, as in `s2n_negotiate`. - * @return A POSIX error signal. The error should be handled as in `s2n_negotiate`. + * @returns A POSIX error signal. The error should be handled as in `s2n_negotiate`. */ S2N_API int s2n_recv_early_data(struct s2n_connection *conn, uint8_t *data, ssize_t max_data_len, ssize_t *data_received, s2n_blocked_status *blocked); @@ -1453,16 +3195,16 @@ struct s2n_offered_early_data; * @param conn A pointer to the connection * @param early_data A pointer which can be used to access information about the proposed early data * and then accept or reject it. - * @return A POSIX error signal. If unsuccessful, the connection will be closed with an error. + * @returns A POSIX error signal. If unsuccessful, the connection will be closed with an error. */ typedef int (*s2n_early_data_cb)(struct s2n_connection *conn, struct s2n_offered_early_data *early_data); /** * Set a callback to accept or reject early data. * - * @param conn A pointer to the connection + * @param config A pointer to the connection config * @param cb A pointer to the implementation of the callback. - * @return A POSIX error signal. If successful, the callback was set. + * @returns A POSIX error signal. If successful, the callback was set. */ S2N_API int s2n_config_set_early_data_cb(struct s2n_config *config, s2n_early_data_cb cb); @@ -1471,7 +3213,7 @@ S2N_API int s2n_config_set_early_data_cb(struct s2n_config *config, s2n_early_da * * @param early_data A pointer to the early data information * @param context_len The length of the user context - * @return A POSIX error signal. + * @returns A POSIX error signal. */ S2N_API int s2n_offered_early_data_get_context_length(struct s2n_offered_early_data *early_data, uint16_t *context_len); @@ -1481,7 +3223,7 @@ S2N_API int s2n_offered_early_data_get_context_length(struct s2n_offered_early_d * @param early_data A pointer to the early data information * @param context A byte buffer to copy the user context into * @param max_len The size of `context`. Must be >= to the result of `s2n_offered_early_data_get_context_length`. - * @return A POSIX error signal. + * @returns A POSIX error signal. */ S2N_API int s2n_offered_early_data_get_context(struct s2n_offered_early_data *early_data, uint8_t *context, uint16_t max_len); @@ -1489,7 +3231,7 @@ S2N_API int s2n_offered_early_data_get_context(struct s2n_offered_early_data *ea * Reject early data offered by the client. * * @param early_data A pointer to the early data information - * @return A POSIX error signal. If success, the client's early data will be rejected. + * @returns A POSIX error signal. If success, the client's early data will be rejected. */ S2N_API int s2n_offered_early_data_reject(struct s2n_offered_early_data *early_data); @@ -1497,7 +3239,7 @@ S2N_API int s2n_offered_early_data_reject(struct s2n_offered_early_data *early_d * Accept early data offered by the client. * * @param early_data A pointer to the early data information - * @return A POSIX error signal. If success, the client's early data will be accepted. + * @returns A POSIX error signal. If success, the client's early data will be accepted. */ S2N_API int s2n_offered_early_data_accept(struct s2n_offered_early_data *early_data); diff --git a/contrib/restricted/aws/s2n/crypto/s2n_stream_cipher_rc4.c b/contrib/restricted/aws/s2n/crypto/s2n_stream_cipher_rc4.c index 9c858ea11b..f5b2695da5 100644 --- a/contrib/restricted/aws/s2n/crypto/s2n_stream_cipher_rc4.c +++ b/contrib/restricted/aws/s2n/crypto/s2n_stream_cipher_rc4.c @@ -24,13 +24,18 @@ static uint8_t s2n_stream_cipher_rc4_available() { +#ifdef S2N_LIBCRYPTO_SUPPORTS_EVP_RC4 if (s2n_is_in_fips_mode()) { return 0; } else { return (EVP_rc4() ? 1 : 0); } +#else + return 0; +#endif /* S2N_LIBCRYPTO_SUPPORTS_EVP_RC4 */ } +#ifdef S2N_LIBCRYPTO_SUPPORTS_EVP_RC4 static int s2n_stream_cipher_rc4_encrypt(struct s2n_session_key *key, struct s2n_blob *in, struct s2n_blob *out) { POSIX_ENSURE_GTE(out->size, in->size); @@ -84,6 +89,39 @@ static int s2n_stream_cipher_rc4_destroy_key(struct s2n_session_key *key) return 0; } +#else + +static int s2n_stream_cipher_rc4_encrypt(struct s2n_session_key *key, struct s2n_blob *in, struct s2n_blob *out) +{ + POSIX_BAIL(S2N_ERR_UNIMPLEMENTED); +} + +static int s2n_stream_cipher_rc4_decrypt(struct s2n_session_key *key, struct s2n_blob *in, struct s2n_blob *out) +{ + POSIX_BAIL(S2N_ERR_UNIMPLEMENTED); +} + +static int s2n_stream_cipher_rc4_set_encryption_key(struct s2n_session_key *key, struct s2n_blob *in) +{ + POSIX_BAIL(S2N_ERR_UNIMPLEMENTED); +} + +static int s2n_stream_cipher_rc4_set_decryption_key(struct s2n_session_key *key, struct s2n_blob *in) +{ + POSIX_BAIL(S2N_ERR_UNIMPLEMENTED); +} + +static int s2n_stream_cipher_rc4_init(struct s2n_session_key *key) +{ + POSIX_BAIL(S2N_ERR_UNIMPLEMENTED); +} + +static int s2n_stream_cipher_rc4_destroy_key(struct s2n_session_key *key) +{ + POSIX_BAIL(S2N_ERR_UNIMPLEMENTED); +} + +#endif /* S2N_LIBCRYPTO_SUPPORTS_EVP_RC4 */ struct s2n_cipher s2n_rc4 = { .type = S2N_STREAM, |