diff options
author | arcadia-devtools <arcadia-devtools@yandex-team.ru> | 2022-04-07 06:09:31 +0300 |
---|---|---|
committer | arcadia-devtools <arcadia-devtools@yandex-team.ru> | 2022-04-07 06:09:31 +0300 |
commit | 059492ddc742f1b1819a2ecbbfda757799535ab4 (patch) | |
tree | 29976963462a0f943b34db1332f8457bbda79f07 | |
parent | 17bc45bc3648f65ad6329dcea38be4880d4b5782 (diff) | |
download | ydb-059492ddc742f1b1819a2ecbbfda757799535ab4.tar.gz |
intermediate changes
ref:53af8adc39d27babd820b9b416ba2ab4614152ca
12 files changed, 380 insertions, 65 deletions
diff --git a/build/rules/autocheck.blacklist b/build/rules/autocheck.blacklist index 44f4b48e54..11d1491b46 100644 --- a/build/rules/autocheck.blacklist +++ b/build/rules/autocheck.blacklist @@ -592,3 +592,7 @@ portal/archive/covers-static hypercube/museum afisha/infra/projects/deployer for-media/terminal-compare +portal/frontend/mumu +education/frontend-schools +for-media/download-regions-linguistics +market/front/libs/partner-release-checker diff --git a/build/ya.conf.json b/build/ya.conf.json index f5aaf64e4f..547896afad 100644 --- a/build/ya.conf.json +++ b/build/ya.conf.json @@ -8591,7 +8591,7 @@ }, "lama": { "formula": { - "sandbox_id": 1261784847, + "sandbox_id": 1268805461, "match": "lama" }, "executable": { diff --git a/contrib/python/boto3/py3/.dist-info/METADATA b/contrib/python/boto3/py3/.dist-info/METADATA index 9af67d3d5c..23a705e0c1 100644 --- a/contrib/python/boto3/py3/.dist-info/METADATA +++ b/contrib/python/boto3/py3/.dist-info/METADATA @@ -1,6 +1,6 @@ Metadata-Version: 2.1 Name: boto3 -Version: 1.21.33 +Version: 1.21.34 Summary: The AWS SDK for Python Home-page: https://github.com/boto/boto3 Author: Amazon Web Services @@ -22,7 +22,7 @@ Classifier: Programming Language :: Python :: 3.10 Requires-Python: >= 3.6 License-File: LICENSE License-File: NOTICE -Requires-Dist: botocore (<1.25.0,>=1.24.33) +Requires-Dist: botocore (<1.25.0,>=1.24.34) Requires-Dist: jmespath (<2.0.0,>=0.7.1) Requires-Dist: s3transfer (<0.6.0,>=0.5.0) Provides-Extra: crt diff --git a/contrib/python/boto3/py3/boto3/__init__.py b/contrib/python/boto3/py3/boto3/__init__.py index f27afdf778..5cf9453e41 100644 --- a/contrib/python/boto3/py3/boto3/__init__.py +++ b/contrib/python/boto3/py3/boto3/__init__.py @@ -17,7 +17,7 @@ from boto3.compat import _warn_deprecated_python from boto3.session import Session __author__ = 'Amazon Web Services' -__version__ = '1.21.33' +__version__ = '1.21.34' # The default Boto3 session; autoloaded when needed. diff --git a/contrib/python/botocore/py3/.dist-info/METADATA b/contrib/python/botocore/py3/.dist-info/METADATA index e4ec251436..f4b157ba2d 100644 --- a/contrib/python/botocore/py3/.dist-info/METADATA +++ b/contrib/python/botocore/py3/.dist-info/METADATA @@ -1,6 +1,6 @@ Metadata-Version: 2.1 Name: botocore -Version: 1.24.33 +Version: 1.24.34 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services diff --git a/contrib/python/botocore/py3/botocore/__init__.py b/contrib/python/botocore/py3/botocore/__init__.py index c0320a3261..08c5f44155 100644 --- a/contrib/python/botocore/py3/botocore/__init__.py +++ b/contrib/python/botocore/py3/botocore/__init__.py @@ -16,7 +16,7 @@ import logging import os import re -__version__ = '1.24.33' +__version__ = '1.24.34' class NullHandler(logging.Handler): diff --git a/contrib/python/botocore/py3/botocore/compat.py b/contrib/python/botocore/py3/botocore/compat.py index bdefb914d4..bedcab26bd 100644 --- a/contrib/python/botocore/py3/botocore/compat.py +++ b/contrib/python/botocore/py3/botocore/compat.py @@ -19,6 +19,7 @@ import warnings import hashlib import logging import shlex +import re import os from math import floor @@ -359,3 +360,44 @@ try: HAS_CRT = not disabled.lower() == 'true' except ImportError: HAS_CRT = False + + +######################################################## +# urllib3 compat backports # +######################################################## + +# Vendoring IPv6 validation regex patterns from urllib3 +# https://github.com/urllib3/urllib3/blob/7e856c0/src/urllib3/util/url.py +IPV4_PAT = r"(?:[0-9]{1,3}\.){3}[0-9]{1,3}" +HEX_PAT = "[0-9A-Fa-f]{1,4}" +LS32_PAT = "(?:{hex}:{hex}|{ipv4})".format(hex=HEX_PAT, ipv4=IPV4_PAT) +_subs = {"hex": HEX_PAT, "ls32": LS32_PAT} +_variations = [ + # 6( h16 ":" ) ls32 + "(?:%(hex)s:){6}%(ls32)s", + # "::" 5( h16 ":" ) ls32 + "::(?:%(hex)s:){5}%(ls32)s", + # [ h16 ] "::" 4( h16 ":" ) ls32 + "(?:%(hex)s)?::(?:%(hex)s:){4}%(ls32)s", + # [ *1( h16 ":" ) h16 ] "::" 3( h16 ":" ) ls32 + "(?:(?:%(hex)s:)?%(hex)s)?::(?:%(hex)s:){3}%(ls32)s", + # [ *2( h16 ":" ) h16 ] "::" 2( h16 ":" ) ls32 + "(?:(?:%(hex)s:){0,2}%(hex)s)?::(?:%(hex)s:){2}%(ls32)s", + # [ *3( h16 ":" ) h16 ] "::" h16 ":" ls32 + "(?:(?:%(hex)s:){0,3}%(hex)s)?::%(hex)s:%(ls32)s", + # [ *4( h16 ":" ) h16 ] "::" ls32 + "(?:(?:%(hex)s:){0,4}%(hex)s)?::%(ls32)s", + # [ *5( h16 ":" ) h16 ] "::" h16 + "(?:(?:%(hex)s:){0,5}%(hex)s)?::%(hex)s", + # [ *6( h16 ":" ) h16 ] "::" + "(?:(?:%(hex)s:){0,6}%(hex)s)?::", +] + +UNRESERVED_PAT = r"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._!\-~" +IPV6_PAT = "(?:" + "|".join([x % _subs for x in _variations]) + ")" +ZONE_ID_PAT = "(?:%25|%)(?:[" + UNRESERVED_PAT + "]|%[a-fA-F0-9]{2})+" +IPV6_ADDRZ_PAT = r"\[" + IPV6_PAT + r"(?:" + ZONE_ID_PAT + r")?\]" +IPV6_ADDRZ_RE = re.compile("^" + IPV6_ADDRZ_PAT + "$") + +# These are the characters that are stripped by post-bpo-43882 urlparse(). +UNSAFE_URL_CHARS = frozenset('\t\r\n') diff --git a/contrib/python/botocore/py3/botocore/data/datasync/2018-11-09/service-2.json b/contrib/python/botocore/py3/botocore/data/datasync/2018-11-09/service-2.json index 3621fc9c59..532a658a10 100644 --- a/contrib/python/botocore/py3/botocore/data/datasync/2018-11-09/service-2.json +++ b/contrib/python/botocore/py3/botocore/data/datasync/2018-11-09/service-2.json @@ -70,6 +70,20 @@ ], "documentation":"<p>Creates an endpoint for an Amazon FSx for Lustre file system.</p>" }, + "CreateLocationFsxOpenZfs":{ + "name":"CreateLocationFsxOpenZfs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateLocationFsxOpenZfsRequest"}, + "output":{"shape":"CreateLocationFsxOpenZfsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} + ], + "documentation":"<p>Creates an endpoint for an Amazon FSx for OpenZFS file system.</p>" + }, "CreateLocationFsxWindows":{ "name":"CreateLocationFsxWindows", "http":{ @@ -250,7 +264,21 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"<p>Returns metadata, such as the path information about an Amazon FSx for Lustre location.</p>" + "documentation":"<p>Returns metadata about an Amazon FSx for Lustre location, such as information about its path.</p>" + }, + "DescribeLocationFsxOpenZfs":{ + "name":"DescribeLocationFsxOpenZfs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeLocationFsxOpenZfsRequest"}, + "output":{"shape":"DescribeLocationFsxOpenZfsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} + ], + "documentation":"<p>Returns metadata about an Amazon FSx for OpenZFS location, such as information about its path.</p>" }, "DescribeLocationFsxWindows":{ "name":"DescribeLocationFsxWindows", @@ -264,7 +292,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"<p>Returns metadata, such as the path information about an Amazon FSx for Windows File Server location.</p>" + "documentation":"<p>Returns metadata about an Amazon FSx for Windows File Server location, such as information about its path.</p>" }, "DescribeLocationHdfs":{ "name":"DescribeLocationHdfs", @@ -759,6 +787,45 @@ } } }, + "CreateLocationFsxOpenZfsRequest":{ + "type":"structure", + "required":[ + "FsxFilesystemArn", + "Protocol", + "SecurityGroupArns" + ], + "members":{ + "FsxFilesystemArn":{ + "shape":"FsxFilesystemArn", + "documentation":"<p>The Amazon Resource Name (ARN) of the FSx for OpenZFS file system.</p>" + }, + "Protocol":{ + "shape":"FsxProtocol", + "documentation":"<p>The type of protocol that DataSync uses to access your file system.</p>" + }, + "SecurityGroupArns":{ + "shape":"Ec2SecurityGroupArnList", + "documentation":"<p>The ARNs of the security groups that are used to configure the FSx for OpenZFS file system.</p>" + }, + "Subdirectory":{ + "shape":"FsxOpenZfsSubdirectory", + "documentation":"<p>A subdirectory in the location's path that must begin with <code>/fsx</code>. DataSync uses this subdirectory to read or write data (depending on whether the file system is a source or destination location).</p>" + }, + "Tags":{ + "shape":"InputTagList", + "documentation":"<p>The key-value pair that represents a tag that you want to add to the resource. The value can be an empty string. This value helps you manage, filter, and search for your resources. We recommend that you create a name tag for your location.</p>" + } + } + }, + "CreateLocationFsxOpenZfsResponse":{ + "type":"structure", + "members":{ + "LocationArn":{ + "shape":"LocationArn", + "documentation":"<p>The ARN of the FSx for OpenZFS file system location that you created.</p>" + } + } + }, "CreateLocationFsxWindowsRequest":{ "type":"structure", "required":[ @@ -778,7 +845,7 @@ }, "SecurityGroupArns":{ "shape":"Ec2SecurityGroupArnList", - "documentation":"<p>The Amazon Resource Names (ARNs) of the security groups that are used to configure the FSx for Windows File Server file system.</p>" + "documentation":"<p>The ARNs of the security groups that are used to configure the FSx for Windows File Server file system.</p>" }, "Tags":{ "shape":"InputTagList", @@ -803,7 +870,7 @@ "members":{ "LocationArn":{ "shape":"LocationArn", - "documentation":"<p>The Amazon Resource Name (ARN) of the FSx for Windows File Server file system location that is created.</p>" + "documentation":"<p>The Amazon Resource Name (ARN) of the FSx for Windows File Server file system location you created.</p>" } } }, @@ -892,7 +959,7 @@ }, "ServerHostname":{ "shape":"ServerHostname", - "documentation":"<p>The name of the NFS server. This value is the IP address or Domain Name Service (DNS) name of the NFS server. An agent that is installed on-premises uses this host name to mount the NFS server in a network. </p> <p>If you are copying data to or from your Snowcone device, see <a href=\"https://docs.aws.amazon.com/datasync/latest/userguide/create-nfs-location.html#nfs-on-snowcone\">NFS Server on Snowcone</a> for more information.</p> <note> <p>This name must either be DNS-compliant or must be an IP version 4 (IPv4) address.</p> </note>" + "documentation":"<p>The name of the NFS server. This value is the IP address or Domain Name Service (DNS) name of the NFS server. An agent that is installed on-premises uses this hostname to mount the NFS server in a network. </p> <p>If you are copying data to or from your Snowcone device, see <a href=\"https://docs.aws.amazon.com/datasync/latest/userguide/create-nfs-location.html#nfs-on-snowcone\">NFS Server on Snowcone</a> for more information.</p> <note> <p>This name must either be DNS-compliant or must be an IP version 4 (IPv4) address.</p> </note>" }, "OnPremConfig":{ "shape":"OnPremConfig", @@ -929,7 +996,7 @@ "members":{ "ServerHostname":{ "shape":"ServerHostname", - "documentation":"<p>The name of the self-managed object storage server. This value is the IP address or Domain Name Service (DNS) name of the object storage server. An agent uses this host name to mount the object storage server in a network. </p>" + "documentation":"<p>The name of the self-managed object storage server. This value is the IP address or Domain Name Service (DNS) name of the object storage server. An agent uses this hostname to mount the object storage server in a network. </p>" }, "ServerPort":{ "shape":"ObjectStorageServerPort", @@ -998,7 +1065,7 @@ "S3Config":{"shape":"S3Config"}, "AgentArns":{ "shape":"AgentArnList", - "documentation":"<p>If you are using DataSync on an Amazon Web Services Outpost, specify the Amazon Resource Names (ARNs) of the DataSync agents deployed on your Outpost. For more information about launching a DataSync agent on an Amazon Web Services Outpost, see <a href=\"https://docs.aws.amazon.com/datasync/latest/userguide/deploy-agents.html#outposts-agent\">Deploy your DataSync agent on Outposts</a>.</p>" + "documentation":"<p>If you're using DataSync on an Amazon Web Services Outpost, specify the Amazon Resource Names (ARNs) of the DataSync agents deployed on your Outpost. For more information about launching a DataSync agent on an Amazon Web Services Outpost, see <a href=\"https://docs.aws.amazon.com/datasync/latest/userguide/deploy-agents.html#outposts-agent\">Deploy your DataSync agent on Outposts</a>.</p>" }, "Tags":{ "shape":"InputTagList", @@ -1282,6 +1349,41 @@ } } }, + "DescribeLocationFsxOpenZfsRequest":{ + "type":"structure", + "required":["LocationArn"], + "members":{ + "LocationArn":{ + "shape":"LocationArn", + "documentation":"<p>The Amazon Resource Name (ARN) of the FSx for OpenZFS location to describe.</p>" + } + } + }, + "DescribeLocationFsxOpenZfsResponse":{ + "type":"structure", + "members":{ + "LocationArn":{ + "shape":"LocationArn", + "documentation":"<p>The ARN of the FSx for OpenZFS location that was described.</p>" + }, + "LocationUri":{ + "shape":"LocationUri", + "documentation":"<p>The uniform resource identifier (URI) of the FSx for OpenZFS location that was described.</p> <p>Example: <code>fsxz://us-west-2.fs-1234567890abcdef02/fsx/folderA/folder</code> </p>" + }, + "SecurityGroupArns":{ + "shape":"Ec2SecurityGroupArnList", + "documentation":"<p>The ARNs of the security groups that are configured for the FSx for OpenZFS file system.</p>" + }, + "Protocol":{ + "shape":"FsxProtocol", + "documentation":"<p>The type of protocol that DataSync uses to access your file system.</p>" + }, + "CreationTime":{ + "shape":"Time", + "documentation":"<p>The time that the FSx for OpenZFS location was created.</p>" + } + } + }, "DescribeLocationFsxWindowsRequest":{ "type":"structure", "required":["LocationArn"], @@ -1706,14 +1808,14 @@ "members":{ "SubnetArn":{ "shape":"Ec2SubnetArn", - "documentation":"<p>The ARN of the subnet and the security group that DataSync uses to access the target EFS file system.</p>" + "documentation":"<p>The ARN of the subnet that DataSync uses to access the target EFS file system.</p>" }, "SecurityGroupArns":{ "shape":"Ec2SecurityGroupArnList", "documentation":"<p>The Amazon Resource Names (ARNs) of the security groups that are configured for the Amazon EC2 resource.</p>" } }, - "documentation":"<p>The subnet and the security group that DataSync uses to access target EFS file system. The subnet must have at least one mount target for that file system. The security group that you provide needs to be able to communicate with the security group on the mount target in the subnet specified. </p>" + "documentation":"<p>The subnet that DataSync uses to access target EFS file system. The subnet must have at least one mount target for that file system. The security group that you provide needs to be able to communicate with the security group on the mount target in the subnet specified. </p>" }, "Ec2SecurityGroupArn":{ "type":"string", @@ -1789,7 +1891,7 @@ }, "FilterValue":{ "type":"string", - "max":409600, + "max":102400, "pattern":"^[^\\x00]+$" }, "FilterValues":{ @@ -1806,6 +1908,28 @@ "max":4096, "pattern":"^[a-zA-Z0-9_\\-\\+\\./\\(\\)\\$\\p{Zs}]+$" }, + "FsxOpenZfsSubdirectory":{ + "type":"string", + "max":4096, + "pattern":"^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{1,4096}$" + }, + "FsxProtocol":{ + "type":"structure", + "members":{ + "NFS":{ + "shape":"FsxProtocolNfs", + "documentation":"<p>Represents the Network File System (NFS) protocol that DataSync uses to access your FSx for OpenZFS file system.</p>" + } + }, + "documentation":"<p>Represents the protocol that DataSync uses to access your Amazon FSx for OpenZFS file system.</p>" + }, + "FsxProtocolNfs":{ + "type":"structure", + "members":{ + "MountOptions":{"shape":"NfsMountOptions"} + }, + "documentation":"<p>Represents the Network File System (NFS) protocol that DataSync uses to access your Amazon FSx for OpenZFS file system.</p>" + }, "FsxWindowsSubdirectory":{ "type":"string", "max":4096, @@ -2163,7 +2287,7 @@ }, "LocationUri":{ "shape":"LocationUri", - "documentation":"<p>Represents a list of URIs of a location. <code>LocationUri</code> returns an array that contains a list of locations when the <a href=\"https://docs.aws.amazon.com/datasync/latest/userguide/API_ListLocations.html\">ListLocations</a> operation is called.</p> <p>Format: <code>TYPE://GLOBAL_ID/SUBDIR</code>.</p> <p>TYPE designates the type of location. Valid values: NFS | EFS | S3.</p> <p>GLOBAL_ID is the globally unique identifier of the resource that backs the location. An example for EFS is <code>us-east-2.fs-abcd1234</code>. An example for Amazon S3 is the bucket name, such as <code>myBucket</code>. An example for NFS is a valid IPv4 address or a host name compliant with Domain Name Service (DNS).</p> <p>SUBDIR is a valid file system path, delimited by forward slashes as is the *nix convention. For NFS and Amazon EFS, it's the export path to mount the location. For Amazon S3, it's the prefix path that you mount to and treat as the root of the location.</p> <p/>" + "documentation":"<p>Represents a list of URIs of a location. <code>LocationUri</code> returns an array that contains a list of locations when the <a href=\"https://docs.aws.amazon.com/datasync/latest/userguide/API_ListLocations.html\">ListLocations</a> operation is called.</p> <p>Format: <code>TYPE://GLOBAL_ID/SUBDIR</code>.</p> <p>TYPE designates the type of location (for example, <code>nfs</code> or <code>s3</code>).</p> <p>GLOBAL_ID is the globally unique identifier of the resource that backs the location. An example for EFS is <code>us-east-2.fs-abcd1234</code>. An example for Amazon S3 is the bucket name, such as <code>myBucket</code>. An example for NFS is a valid IPv4 address or a hostname that is compliant with Domain Name Service (DNS).</p> <p>SUBDIR is a valid file system path, delimited by forward slashes as is the *nix convention. For NFS and Amazon EFS, it's the export path to mount the location. For Amazon S3, it's the prefix path that you mount to and treat as the root of the location.</p> <p/>" } }, "documentation":"<p>Represents a single entry in a list of locations. <code>LocationListEntry</code> returns an array that contains a list of locations when the <a href=\"https://docs.aws.amazon.com/datasync/latest/userguide/API_ListLocations.html\">ListLocations</a> operation is called.</p>" @@ -2171,7 +2295,7 @@ "LocationUri":{ "type":"string", "max":4356, - "pattern":"^(efs|nfs|s3|smb|fsxw|fsxl)://[a-zA-Z0-9.\\-]+$" + "pattern":"^(efs|nfs|s3|smb|hdfs|fsx[a-z0-9]+)://[a-zA-Z0-9.:/\\-]+$" }, "LogGroupArn":{ "type":"string", @@ -2453,10 +2577,10 @@ "members":{ "BucketAccessRoleArn":{ "shape":"IamRoleArn", - "documentation":"<p>The Amazon S3 bucket to access. This bucket is used as a parameter in the <a href=\"https://docs.aws.amazon.com/datasync/latest/userguide/API_CreateLocationS3.html\">CreateLocationS3</a> operation. </p>" + "documentation":"<p>The ARN of the IAM role for accessing the S3 bucket. </p>" } }, - "documentation":"<p>The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that is used to access an Amazon S3 bucket.</p> <p>For detailed information about using such a role, see Creating a Location for Amazon S3 in the <i>DataSync User Guide</i>.</p>" + "documentation":"<p>The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role used to access an Amazon S3 bucket.</p> <p>For detailed information about using such a role, see Creating a Location for Amazon S3 in the <i>DataSync User Guide</i>.</p>" }, "S3StorageClass":{ "type":"string", @@ -2899,11 +3023,11 @@ }, "KerberosKeytab":{ "shape":"KerberosKeytabFile", - "documentation":"<p>The Kerberos key table (keytab) that contains mappings between the defined Kerberos principal and the encrypted keys. You can load the keytab from a file by providing the file's address. If you use the AWS CLI, it performs base64 encoding for you. Otherwise, provide the base64-encoded text.</p>" + "documentation":"<p>The Kerberos key table (keytab) that contains mappings between the defined Kerberos principal and the encrypted keys. You can load the keytab from a file by providing the file's address. If you use the CLI, it performs base64 encoding for you. Otherwise, provide the base64-encoded text.</p>" }, "KerberosKrb5Conf":{ "shape":"KerberosKrb5ConfFile", - "documentation":"<p>The <code>krb5.conf</code> file that contains the Kerberos configuration information. You can load the <code>krb5.conf</code> file by providing the file's address. If you're using the AWS CLI, it performs the base64 encoding for you. Otherwise, provide the base64-encoded text.</p>" + "documentation":"<p>The <code>krb5.conf</code> file that contains the Kerberos configuration information. You can load the <code>krb5.conf</code> file by providing the file's address. If you're using the CLI, it performs the base64 encoding for you. Otherwise, provide the base64-encoded text.</p>" }, "AgentArns":{ "shape":"AgentArnList", diff --git a/contrib/python/botocore/py3/botocore/data/fsx/2018-03-01/service-2.json b/contrib/python/botocore/py3/botocore/data/fsx/2018-03-01/service-2.json index 9a34cdb1f4..bb702054d1 100644 --- a/contrib/python/botocore/py3/botocore/data/fsx/2018-03-01/service-2.json +++ b/contrib/python/botocore/py3/botocore/data/fsx/2018-03-01/service-2.json @@ -2809,7 +2809,7 @@ }, "Lifecycle":{ "shape":"FileSystemLifecycle", - "documentation":"<p>The lifecycle status of the file system. The following are the possible values and what they mean:</p> <ul> <li> <p> <code>AVAILABLE</code> - The file system is in a healthy state, and is reachable and available for use.</p> </li> <li> <p> <code>CREATING</code> - Amazon FSx is creating the new file system.</p> </li> <li> <p> <code>DELETING</code> - Amazon FSx is deleting an existing file system.</p> </li> <li> <p> <code>FAILED</code> - An existing file system has experienced an unrecoverable failure. When creating a new file system, Amazon FSx was unable to create the file system.</p> </li> <li> <p> <code>MISCONFIGURED</code> - The file system is in a failed but recoverable state.</p> </li> <li> <p> <code>UPDATING</code> - The file system is undergoing a customer-initiated update.</p> </li> </ul>" + "documentation":"<p>The lifecycle status of the file system. The following are the possible values and what they mean:</p> <ul> <li> <p> <code>AVAILABLE</code> - The file system is in a healthy state, and is reachable and available for use.</p> </li> <li> <p> <code>CREATING</code> - Amazon FSx is creating the new file system.</p> </li> <li> <p> <code>DELETING</code> - Amazon FSx is deleting an existing file system.</p> </li> <li> <p> <code>FAILED</code> - An existing file system has experienced an unrecoverable failure. When creating a new file system, Amazon FSx was unable to create the file system.</p> </li> <li> <p> <code>MISCONFIGURED</code> - The file system is in a failed but recoverable state.</p> </li> <li> <p> <code>MISCONFIGURED_UNAVAILABLE</code> - (Amazon FSx for Windows File Server only) The file system is currently unavailable due to a change in your Active Directory configuration.</p> </li> <li> <p> <code>UPDATING</code> - The file system is undergoing a customer-initiated update.</p> </li> </ul>" }, "FailureDetails":{"shape":"FileSystemFailureDetails"}, "StorageCapacity":{ @@ -2935,7 +2935,8 @@ "FAILED", "DELETING", "MISCONFIGURED", - "UPDATING" + "UPDATING", + "MISCONFIGURED_UNAVAILABLE" ] }, "FileSystemMaintenanceOperation":{ @@ -3869,7 +3870,7 @@ }, "Options":{ "shape":"RestoreOpenZFSVolumeOptions", - "documentation":"<p>The settings used when restoring the specified volume from snapshot. </p> <ul> <li> <p> <code>DELETE_INTERMEDIATE_SNAPSHOTS</code> - Deletes snapshots between the current state and the specified snapshot. If there are intermediate snapshots and this option isn't used, <code>RestoreVolumeFromSnapshot</code> fails.</p> </li> <li> <p> <code>DELETE_CLONED_VOLUMES</code> - Deletes any volumes cloned from this volume. If there are any cloned volumes and this option isn't used, <code>RestoreVolumeFromSnapshot</code> fails.</p> </li> </ul>" + "documentation":"<p>The settings used when restoring the specified volume from snapshot. </p> <ul> <li> <p> <code>DELETE_INTERMEDIATE_SNAPSHOTS</code> - Deletes snapshots between the current state and the specified snapshot. If there are intermediate snapshots and this option isn't used, <code>RestoreVolumeFromSnapshot</code> fails.</p> </li> <li> <p> <code>DELETE_CLONED_VOLUMES</code> - Deletes any dependent clone volumes created from intermediate snapshots. If there are any dependent clone volumes and this option isn't used, <code>RestoreVolumeFromSnapshot</code> fails.</p> </li> </ul>" } } }, @@ -4595,7 +4596,7 @@ }, "ThroughputCapacity":{ "shape":"MegabytesPerSecond", - "documentation":"<p>Specifies the throughput of an FSx for NetApp ONTAP file system, measured in megabytes per second (MBps). Valid values are 64, 128, 256, 512, 1024, 2048, 3072, or 4096 MB/s.</p>" + "documentation":"<p>Specifies the throughput of an FSx for NetApp ONTAP file system, measured in megabytes per second (MBps). Valid values are 128, 256, 512, 1024, or 2048 MB/s.</p>" } }, "documentation":"<p>The configuration updates for an Amazon FSx for NetApp ONTAP file system.</p>" diff --git a/contrib/python/botocore/py3/botocore/data/s3control/2018-08-20/service-2.json b/contrib/python/botocore/py3/botocore/data/s3control/2018-08-20/service-2.json index 085065aeb6..d712b5d4ba 100644 --- a/contrib/python/botocore/py3/botocore/data/s3control/2018-08-20/service-2.json +++ b/contrib/python/botocore/py3/botocore/data/s3control/2018-08-20/service-2.json @@ -742,7 +742,7 @@ {"shape":"NotFoundException"}, {"shape":"TooManyTagsException"} ], - "documentation":"<p>Sets the supplied tag-set on an S3 Batch Operations job.</p> <p>A tag is a key-value pair. You can associate S3 Batch Operations tags with any job by sending a PUT request against the tagging subresource that is associated with the job. To modify the existing tag set, you can either replace the existing tag set entirely, or make changes within the existing tag set by retrieving the existing tag set using <a href=\"https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetJobTagging.html\">GetJobTagging</a>, modify that tag set, and use this action to replace the tag set with the one you modified. For more information, see <a href=\"https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-managing-jobs.html#batch-ops-job-tags\">Controlling access and labeling jobs using tags</a> in the <i>Amazon S3 User Guide</i>. </p> <p/> <note> <ul> <li> <p>If you send this request with an empty tag set, Amazon S3 deletes the existing tag set on the Batch Operations job. If you use this method, you are charged for a Tier 1 Request (PUT). For more information, see <a href=\"http://aws.amazon.com/s3/pricing/\">Amazon S3 pricing</a>.</p> </li> <li> <p>For deleting existing tags for your Batch Operations job, a <a href=\"https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteJobTagging.html\">DeleteJobTagging</a> request is preferred because it achieves the same result without incurring charges.</p> </li> <li> <p>A few things to consider about using tags:</p> <ul> <li> <p>Amazon S3 limits the maximum number of tags to 50 tags per job.</p> </li> <li> <p>You can associate up to 50 tags with a job as long as they have unique tag keys.</p> </li> <li> <p>A tag key can be up to 128 Unicode characters in length, and tag values can be up to 256 Unicode characters in length.</p> </li> <li> <p>The key and values are case sensitive.</p> </li> <li> <p>For tagging-related restrictions related to characters and encodings, see <a href=\"https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html\">User-Defined Tag Restrictions</a> in the <i>Billing and Cost Management User Guide</i>.</p> </li> </ul> </li> </ul> </note> <p/> <p>To use this action, you must have permission to perform the <code>s3:PutJobTagging</code> action.</p> <p>Related actions include:</p> <ul> <li> <p> <a href=\"https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateJob.html\">CreatJob</a> </p> </li> <li> <p> <a href=\"https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetJobTagging.html\">GetJobTagging</a> </p> </li> <li> <p> <a href=\"https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteJobTagging.html\">DeleteJobTagging</a> </p> </li> </ul>", + "documentation":"<p>Sets the supplied tag-set on an S3 Batch Operations job.</p> <p>A tag is a key-value pair. You can associate S3 Batch Operations tags with any job by sending a PUT request against the tagging subresource that is associated with the job. To modify the existing tag set, you can either replace the existing tag set entirely, or make changes within the existing tag set by retrieving the existing tag set using <a href=\"https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetJobTagging.html\">GetJobTagging</a>, modify that tag set, and use this action to replace the tag set with the one you modified. For more information, see <a href=\"https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-managing-jobs.html#batch-ops-job-tags\">Controlling access and labeling jobs using tags</a> in the <i>Amazon S3 User Guide</i>. </p> <p/> <note> <ul> <li> <p>If you send this request with an empty tag set, Amazon S3 deletes the existing tag set on the Batch Operations job. If you use this method, you are charged for a Tier 1 Request (PUT). For more information, see <a href=\"http://aws.amazon.com/s3/pricing/\">Amazon S3 pricing</a>.</p> </li> <li> <p>For deleting existing tags for your Batch Operations job, a <a href=\"https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteJobTagging.html\">DeleteJobTagging</a> request is preferred because it achieves the same result without incurring charges.</p> </li> <li> <p>A few things to consider about using tags:</p> <ul> <li> <p>Amazon S3 limits the maximum number of tags to 50 tags per job.</p> </li> <li> <p>You can associate up to 50 tags with a job as long as they have unique tag keys.</p> </li> <li> <p>A tag key can be up to 128 Unicode characters in length, and tag values can be up to 256 Unicode characters in length.</p> </li> <li> <p>The key and values are case sensitive.</p> </li> <li> <p>For tagging-related restrictions related to characters and encodings, see <a href=\"https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html\">User-Defined Tag Restrictions</a> in the <i>Billing and Cost Management User Guide</i>.</p> </li> </ul> </li> </ul> </note> <p/> <p>To use this action, you must have permission to perform the <code>s3:PutJobTagging</code> action.</p> <p>Related actions include:</p> <ul> <li> <p> <a href=\"https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateJob.html\">CreateJob</a> </p> </li> <li> <p> <a href=\"https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetJobTagging.html\">GetJobTagging</a> </p> </li> <li> <p> <a href=\"https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteJobTagging.html\">DeleteJobTagging</a> </p> </li> </ul>", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -4468,7 +4468,7 @@ }, "TargetKeyPrefix":{ "shape":"NonEmptyMaxLength1024String", - "documentation":"<p>Specifies the folder prefix into which you would like the objects to be copied. For example, to copy objects into a folder named \"Folder1\" in the destination bucket, set the TargetKeyPrefix to \"Folder1/\".</p>" + "documentation":"<p>Specifies the folder prefix into which you would like the objects to be copied. For example, to copy objects into a folder named <code>Folder1</code> in the destination bucket, set the TargetKeyPrefix to <code>Folder1</code>.</p>" }, "ObjectLockLegalHoldStatus":{ "shape":"S3ObjectLockLegalHoldStatus", diff --git a/contrib/python/botocore/py3/botocore/data/securityhub/2018-10-26/service-2.json b/contrib/python/botocore/py3/botocore/data/securityhub/2018-10-26/service-2.json index 14d5c9cf80..e13531710d 100644 --- a/contrib/python/botocore/py3/botocore/data/securityhub/2018-10-26/service-2.json +++ b/contrib/python/botocore/py3/botocore/data/securityhub/2018-10-26/service-2.json @@ -1647,10 +1647,36 @@ "AvailabilityZones":{ "shape":"AwsAutoScalingAutoScalingGroupAvailabilityZonesList", "documentation":"<p>The list of Availability Zones for the automatic scaling group.</p>" + }, + "LaunchTemplate":{ + "shape":"AwsAutoScalingAutoScalingGroupLaunchTemplateLaunchTemplateSpecification", + "documentation":"<p>The launch template to use.</p>" + }, + "CapacityRebalance":{ + "shape":"Boolean", + "documentation":"<p>Indicates whether capacity rebalancing is enabled. </p>" } }, "documentation":"<p>Provides details about an auto scaling group.</p>" }, + "AwsAutoScalingAutoScalingGroupLaunchTemplateLaunchTemplateSpecification":{ + "type":"structure", + "members":{ + "LaunchTemplateId":{ + "shape":"NonEmptyString", + "documentation":"<p>The identifier of the launch template. You must specify either <code>LaunchTemplateId</code> or <code>LaunchTemplateName</code>.</p>" + }, + "LaunchTemplateName":{ + "shape":"NonEmptyString", + "documentation":"<p>The name of the launch template. You must specify either <code>LaunchTemplateId</code> or <code>LaunchTemplateName</code>.</p>" + }, + "Version":{ + "shape":"NonEmptyString", + "documentation":"<p>Identifies the version of the launch template. You can specify a version identifier, or use the values <code>$Latest</code> or <code>$Default</code>.</p>" + } + }, + "documentation":"<p>Details about the launch template to use.</p>" + }, "AwsAutoScalingAutoScalingGroupMixedInstancesPolicyDetails":{ "type":"structure", "members":{ @@ -1700,7 +1726,7 @@ "members":{ "LaunchTemplateSpecification":{ "shape":"AwsAutoScalingAutoScalingGroupMixedInstancesPolicyLaunchTemplateLaunchTemplateSpecification", - "documentation":"<p>The launch template to use.</p>" + "documentation":"<p>The launch template to use for a mixed instances policy.</p>" }, "Overrides":{ "shape":"AwsAutoScalingAutoScalingGroupMixedInstancesPolicyLaunchTemplateOverridesList", @@ -1725,7 +1751,7 @@ "documentation":"<p>Identifies the version of the launch template. You can specify a version identifier, or use the values <code>$Latest</code> or <code>$Default</code>.</p>" } }, - "documentation":"<p>Details about the launch template to use.</p>" + "documentation":"<p>Details about the launch template to use for a mixed instances policy.</p>" }, "AwsAutoScalingAutoScalingGroupMixedInstancesPolicyLaunchTemplateOverridesList":{ "type":"list", @@ -2513,6 +2539,10 @@ "VpcConfig":{ "shape":"AwsCodeBuildProjectVpcConfig", "documentation":"<p>Information about the VPC configuration that CodeBuild accesses.</p>" + }, + "SecondaryArtifacts":{ + "shape":"AwsCodeBuildProjectArtifactsList", + "documentation":"<p>Information about the secondary artifacts for the CodeBuild project.</p>" } }, "documentation":"<p>Information about an CodeBuild project.</p>" @@ -5690,6 +5720,24 @@ }, "documentation":"<p>Contains information about the access log configuration for the load balancer.</p>" }, + "AwsElbLoadBalancerAdditionalAttribute":{ + "type":"structure", + "members":{ + "Key":{ + "shape":"NonEmptyString", + "documentation":"<p>The name of the attribute.</p>" + }, + "Value":{ + "shape":"NonEmptyString", + "documentation":"<p>The value of the attribute.</p>" + } + }, + "documentation":"<p>Provides information about additional attributes for the load balancer.</p>" + }, + "AwsElbLoadBalancerAdditionalAttributeList":{ + "type":"list", + "member":{"shape":"AwsElbLoadBalancerAdditionalAttribute"} + }, "AwsElbLoadBalancerAttributes":{ "type":"structure", "members":{ @@ -5708,6 +5756,10 @@ "CrossZoneLoadBalancing":{ "shape":"AwsElbLoadBalancerCrossZoneLoadBalancing", "documentation":"<p>Cross-zone load balancing settings for the load balancer.</p> <p>If cross-zone load balancing is enabled, the load balancer routes the request traffic evenly across all instances regardless of the Availability Zones.</p>" + }, + "AdditionalAttributes":{ + "shape":"AwsElbLoadBalancerAdditionalAttributeList", + "documentation":"<p>Any additional attributes for a load balancer.</p>" } }, "documentation":"<p>Contains attributes for the load balancer.</p>" @@ -7800,6 +7852,84 @@ "type":"list", "member":{"shape":"AwsRdsDbProcessorFeature"} }, + "AwsRdsDbSecurityGroupDetails":{ + "type":"structure", + "members":{ + "DbSecurityGroupArn":{ + "shape":"NonEmptyString", + "documentation":"<p>The ARN for the DB security group.</p>" + }, + "DbSecurityGroupDescription":{ + "shape":"NonEmptyString", + "documentation":"<p>Provides the description of the DB security group.</p>" + }, + "DbSecurityGroupName":{ + "shape":"NonEmptyString", + "documentation":"<p>Specifies the name of the DB security group.</p>" + }, + "Ec2SecurityGroups":{ + "shape":"AwsRdsDbSecurityGroupEc2SecurityGroups", + "documentation":"<p>Contains a list of EC2 security groups.</p>" + }, + "IpRanges":{ + "shape":"AwsRdsDbSecurityGroupIpRanges", + "documentation":"<p>Contains a list of IP ranges.</p>" + }, + "OwnerId":{ + "shape":"NonEmptyString", + "documentation":"<p>Provides the Amazon Web Services ID of the owner of a specific DB security group.</p>" + }, + "VpcId":{ + "shape":"NonEmptyString", + "documentation":"<p>Provides VPC ID associated with the DB security group. </p>" + } + }, + "documentation":"<p>Provides information about an Amazon RDS DB security group.</p>" + }, + "AwsRdsDbSecurityGroupEc2SecurityGroup":{ + "type":"structure", + "members":{ + "Ec2SecurityGroupId":{ + "shape":"NonEmptyString", + "documentation":"<p>Specifies the ID for the EC2 security group.</p>" + }, + "Ec2SecurityGroupName":{ + "shape":"NonEmptyString", + "documentation":"<p>Specifies the name of the EC2 security group.</p>" + }, + "Ec2SecurityGroupOwnerId":{ + "shape":"NonEmptyString", + "documentation":"<p>Provides the Amazon Web Services ID of the owner of the EC2 security group.</p>" + }, + "Status":{ + "shape":"NonEmptyString", + "documentation":"<p>Provides the status of the EC2 security group.</p>" + } + }, + "documentation":"<p>EC2 security group information for an RDS DB security group.</p>" + }, + "AwsRdsDbSecurityGroupEc2SecurityGroups":{ + "type":"list", + "member":{"shape":"AwsRdsDbSecurityGroupEc2SecurityGroup"} + }, + "AwsRdsDbSecurityGroupIpRange":{ + "type":"structure", + "members":{ + "CidrIp":{ + "shape":"NonEmptyString", + "documentation":"<p>Specifies the IP range.</p>" + }, + "Status":{ + "shape":"NonEmptyString", + "documentation":"<p>Specifies the status of the IP range.</p>" + } + }, + "documentation":"<p>IP range information for an RDS DB security group.</p>" + }, + "AwsRdsDbSecurityGroupIpRanges":{ + "type":"list", + "member":{"shape":"AwsRdsDbSecurityGroupIpRange"} + }, "AwsRdsDbSnapshotDetails":{ "type":"structure", "members":{ @@ -8364,6 +8494,10 @@ "VpcSecurityGroups":{ "shape":"AwsRedshiftClusterVpcSecurityGroups", "documentation":"<p>The list of VPC security groups that the cluster belongs to, if the cluster is in a VPC.</p>" + }, + "LoggingStatus":{ + "shape":"AwsRedshiftClusterLoggingStatus", + "documentation":"<p>Information about the logging status of the cluster.</p>" } }, "documentation":"<p>Details about an Amazon Redshift cluster.</p>" @@ -8432,6 +8566,36 @@ "type":"list", "member":{"shape":"AwsRedshiftClusterIamRole"} }, + "AwsRedshiftClusterLoggingStatus":{ + "type":"structure", + "members":{ + "BucketName":{ + "shape":"NonEmptyString", + "documentation":"<p>The name of the S3 bucket where the log files are stored.</p>" + }, + "LastFailureMessage":{ + "shape":"NonEmptyString", + "documentation":"<p>The message indicating that the logs failed to be delivered.</p>" + }, + "LastFailureTime":{ + "shape":"NonEmptyString", + "documentation":"<p>The last time when logs failed to be delivered.</p> <p>Uses the <code>date-time</code> format specified in <a href=\"https://tools.ietf.org/html/rfc3339#section-5.6\">RFC 3339 section 5.6, Internet Date/Time Format</a>. The value cannot contain spaces. For example, <code>2020-03-22T13:22:13.933Z</code>.</p>" + }, + "LastSuccessfulDeliveryTime":{ + "shape":"NonEmptyString", + "documentation":"<p>The last time that logs were delivered successfully.</p> <p>Uses the <code>date-time</code> format specified in <a href=\"https://tools.ietf.org/html/rfc3339#section-5.6\">RFC 3339 section 5.6, Internet Date/Time Format</a>. The value cannot contain spaces. For example, <code>2020-03-22T13:22:13.933Z</code>.</p>" + }, + "LoggingEnabled":{ + "shape":"Boolean", + "documentation":"<p>Indicates whether logging is enabled.</p>" + }, + "S3KeyPrefix":{ + "shape":"NonEmptyString", + "documentation":"<p>Provides the prefix applied to the log file names.</p>" + } + }, + "documentation":"<p>Provides information about the logging status of the cluster.</p>" + }, "AwsRedshiftClusterPendingModifiedValues":{ "type":"structure", "members":{ @@ -12982,6 +13146,10 @@ "AwsNetworkFirewallRuleGroup":{ "shape":"AwsNetworkFirewallRuleGroupDetails", "documentation":"<p>Details about an Network Firewall rule group.</p>" + }, + "AwsRdsDbSecurityGroup":{ + "shape":"AwsRdsDbSecurityGroupDetails", + "documentation":"<p>Details about an Amazon RDS DB security group.</p>" } }, "documentation":"<p>Additional details about a resource related to a finding.</p> <p>To provide the details, use the object that corresponds to the resource type. For example, if the resource type is <code>AwsEc2Instance</code>, then you use the <code>AwsEc2Instance</code> object to provide the details.</p> <p>If the type-specific object does not contain all of the fields you want to populate, then you use the <code>Other</code> object to populate those additional fields.</p> <p>You also use the <code>Other</code> object to populate the details when the selected type does not have a corresponding object.</p>" diff --git a/contrib/python/botocore/py3/botocore/utils.py b/contrib/python/botocore/py3/botocore/utils.py index e177b0a6d6..49aef2f299 100644 --- a/contrib/python/botocore/py3/botocore/utils.py +++ b/contrib/python/botocore/py3/botocore/utils.py @@ -32,6 +32,17 @@ from urllib3.exceptions import LocationParseError import botocore import botocore.awsrequest import botocore.httpsession + +# IP Regexes retained for backwards compatibility +from botocore.compat import HEX_PAT # noqa: F401 +from botocore.compat import IPV4_PAT # noqa: F401 +from botocore.compat import IPV6_ADDRZ_PAT # noqa: F401 +from botocore.compat import IPV6_ADDRZ_RE # noqa: F401 +from botocore.compat import IPV6_PAT # noqa: F401 +from botocore.compat import LS32_PAT # noqa: F401 +from botocore.compat import UNRESERVED_PAT # noqa: F401 +from botocore.compat import UNSAFE_URL_CHARS # noqa: F401 +from botocore.compat import ZONE_ID_PAT # noqa: F401 from botocore.compat import ( HAS_CRT, MD5_AVAILABLE, @@ -166,41 +177,6 @@ EVENT_ALIASES = { "tagging": "resource-groups-tagging-api" } -# Vendoring IPv6 validation regex patterns from urllib3 -# https://github.com/urllib3/urllib3/blob/7e856c0/src/urllib3/util/url.py -IPV4_PAT = r"(?:[0-9]{1,3}\.){3}[0-9]{1,3}" -HEX_PAT = "[0-9A-Fa-f]{1,4}" -LS32_PAT = "(?:{hex}:{hex}|{ipv4})".format(hex=HEX_PAT, ipv4=IPV4_PAT) -_subs = {"hex": HEX_PAT, "ls32": LS32_PAT} -_variations = [ - # 6( h16 ":" ) ls32 - "(?:%(hex)s:){6}%(ls32)s", - # "::" 5( h16 ":" ) ls32 - "::(?:%(hex)s:){5}%(ls32)s", - # [ h16 ] "::" 4( h16 ":" ) ls32 - "(?:%(hex)s)?::(?:%(hex)s:){4}%(ls32)s", - # [ *1( h16 ":" ) h16 ] "::" 3( h16 ":" ) ls32 - "(?:(?:%(hex)s:)?%(hex)s)?::(?:%(hex)s:){3}%(ls32)s", - # [ *2( h16 ":" ) h16 ] "::" 2( h16 ":" ) ls32 - "(?:(?:%(hex)s:){0,2}%(hex)s)?::(?:%(hex)s:){2}%(ls32)s", - # [ *3( h16 ":" ) h16 ] "::" h16 ":" ls32 - "(?:(?:%(hex)s:){0,3}%(hex)s)?::%(hex)s:%(ls32)s", - # [ *4( h16 ":" ) h16 ] "::" ls32 - "(?:(?:%(hex)s:){0,4}%(hex)s)?::%(ls32)s", - # [ *5( h16 ":" ) h16 ] "::" h16 - "(?:(?:%(hex)s:){0,5}%(hex)s)?::%(hex)s", - # [ *6( h16 ":" ) h16 ] "::" - "(?:(?:%(hex)s:){0,6}%(hex)s)?::", -] - -UNRESERVED_PAT = r"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._!\-~" -IPV6_PAT = "(?:" + "|".join([x % _subs for x in _variations]) + ")" -ZONE_ID_PAT = "(?:%25|%)(?:[" + UNRESERVED_PAT + "]|%[a-fA-F0-9]{2})+" -IPV6_ADDRZ_PAT = r"\[" + IPV6_PAT + r"(?:" + ZONE_ID_PAT + r")?\]" -IPV6_ADDRZ_RE = re.compile("^" + IPV6_ADDRZ_PAT + "$") - -# These are the characters that are stripped by post-bpo-43882 urlparse(). -UNSAFE_URL_CHARS = frozenset('\t\r\n') # This pattern can be used to detect if a header is a flexible checksum header CHECKSUM_HEADER_PATTERN = re.compile( |